diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..3b31955f20fdad2dcbb3249afaa8fb65d8c72b7e --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,18 @@ +version: 2.1 +orbs: + ci-go: ipfs/ci-go@0.2.9 + +workflows: + version: 2 + test: + jobs: + - ci-go/build + - ci-go/lint + - ci-go/test + - ci-go/test: + race: true + name: "ci-go/test/race" + #- ci-go/benchmark: + # tolerance: 50 + # requires: + # - ci-go/test diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..4b86d7197f9a6f1a988c503defc8451392cd5e55 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Getting Help on IPFS + url: https://ipfs.io/help + about: All information about how and where to get help on IPFS. + - name: IPFS Official Forum + url: https://discuss.ipfs.io + about: Please post general questions, support requests, and discussions here. diff --git a/.github/ISSUE_TEMPLATE/open_an_issue.md b/.github/ISSUE_TEMPLATE/open_an_issue.md new file mode 100644 index 0000000000000000000000000000000000000000..4fcbd00aca0d513c2729d8df4afb5a01fdbe7d02 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/open_an_issue.md @@ -0,0 +1,19 @@ +--- +name: Open an issue +about: Only for actionable issues relevant to this repository. +title: '' +labels: need/triage +assignees: '' + +--- + diff --git a/.github/config.yml b/.github/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..ed26646a0f7cdda6cf10ede2b0b98cac89cf67b0 --- /dev/null +++ b/.github/config.yml @@ -0,0 +1,68 @@ +# Configuration for welcome - https://github.com/behaviorbot/welcome + +# Configuration for new-issue-welcome - https://github.com/behaviorbot/new-issue-welcome +# Comment to be posted to on first time issues +newIssueWelcomeComment: > + Thank you for submitting your first issue to this repository! A maintainer + will be here shortly to triage and review. + + In the meantime, please double-check that you have provided all the + necessary information to make this process easy! Any information that can + help save additional round trips is useful! We currently aim to give + initial feedback within **two business days**. If this does not happen, feel + free to leave a comment. + + Please keep an eye on how this issue will be labeled, as labels give an + overview of priorities, assignments and additional actions requested by the + maintainers: + + - "Priority" labels will show how urgent this is for the team. + - "Status" labels will show if this is ready to be worked on, blocked, or in progress. + - "Need" labels will indicate if additional input or analysis is required. + + Finally, remember to use https://discuss.ipfs.io if you just need general + support. + +# Configuration for new-pr-welcome - https://github.com/behaviorbot/new-pr-welcome +# Comment to be posted to on PRs from first time contributors in your repository +newPRWelcomeComment: > + Thank you for submitting this PR! + + A maintainer will be here shortly to review it. + + We are super grateful, but we are also overloaded! Help us by making sure + that: + + * The context for this PR is clear, with relevant discussion, decisions + and stakeholders linked/mentioned. + + * Your contribution itself is clear (code comments, self-review for the + rest) and in its best form. Follow the [code contribution + guidelines](https://github.com/ipfs/community/blob/master/CONTRIBUTING.md#code-contribution-guidelines) + if they apply. + + Getting other community members to do a review would be great help too on + complex PRs (you can ask in the chats/forums). If you are unsure about + something, just leave us a comment. + + Next steps: + + * A maintainer will triage and assign priority to this PR, commenting on + any missing things and potentially assigning a reviewer for high + priority items. + + * The PR gets reviews, discussed and approvals as needed. + + * The PR is merged by maintainers when it has been approved and comments addressed. + + We currently aim to provide initial feedback/triaging within **two business + days**. Please keep an eye on any labelling actions, as these will indicate + priorities and status of your contribution. + + We are very grateful for your contribution! + + +# Configuration for first-pr-merge - https://github.com/behaviorbot/first-pr-merge +# Comment to be posted to on pull requests merged by a first time user +# Currently disabled +#firstPRMergeComment: "" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a9a5aecf429fd8a0d81fbd5fd37006bfa498d5c1 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +tmp diff --git a/.gx/lastpubver b/.gx/lastpubver new file mode 100644 index 0000000000000000000000000000000000000000..64adeb1bd5a940a3776a0e4045dbc1fb454ac984 --- /dev/null +++ b/.gx/lastpubver @@ -0,0 +1 @@ +1.1.31: QmVNRbcH1kKEQUVhCsH75kTGUVFMw2b7zEWyFKyfCwmJjo diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7d5dcac4d22769b1b48501e091ee9750dd053300 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2018 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..20619413c9a8dae4704c35164d28a0f47d9b4151 --- /dev/null +++ b/Makefile @@ -0,0 +1,11 @@ +gx: + go get github.com/whyrusleeping/gx + go get github.com/whyrusleeping/gx-go + +deps: gx + gx --verbose install --global + gx-go rewrite + +publish: + gx-go rewrite --undo + diff --git a/README.md b/README.md index e53dc7e61611a735d08360798d4df5cb98995791..043579fb26b4c5c4b2e0cc4b958209a54412fa60 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,148 @@ -# go-bitswap +go-bitswap +================== +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) +[![Matrix](https://img.shields.io/badge/matrix-%23ipfs%3Amatrix.org-blue.svg?style=flat-square)](https://matrix.to/#/#ipfs:matrix.org) +[![IRC](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Discord](https://img.shields.io/discord/475789330380488707?color=blueviolet&label=discord&style=flat-square)](https://discord.gg/24fmuwR) +[![Coverage Status](https://codecov.io/gh/ipfs/go-bitswap/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-bitswap/branch/master) +[![Build Status](https://circleci.com/gh/ipfs/go-bitswap.svg?style=svg)](https://circleci.com/gh/ipfs/go-bitswap) + +> An implementation of the bitswap protocol in go! + +## Lead Maintainer + +[Dirk McCormick](https://github.com/dirkmc) + +## Table of Contents + +- [Background](#background) +- [Install](#install) +- [Usage](#usage) +- [Implementation](#implementation) +- [Contribute](#contribute) +- [License](#license) + + +## Background + +Bitswap is the data trading module for ipfs. It manages requesting and sending +blocks to and from other peers in the network. Bitswap has two main jobs: +- to acquire blocks requested by the client from the network +- to judiciously send blocks in its possession to other peers who want them + +Bitswap is a message based protocol, as opposed to request-response. All messages +contain wantlists or blocks. + +A node sends a wantlist to tell peers which blocks it wants. When a node receives +a wantlist it should check which blocks it has from the wantlist, and consider +sending the matching blocks to the requestor. + +When a node receives blocks that it asked for, the node should send out a +notification called a 'Cancel' to tell its peers that the node no longer +wants those blocks. + +`go-bitswap` provides an implementation of the Bitswap protocol in go. + +[Learn more about how Bitswap works](./docs/how-bitswap-works.md) + +## Install + +`go-bitswap` requires Go >= 1.11 and can be installed using Go modules + +## Usage + +### Initializing a Bitswap Exchange + +```golang +import ( + "context" + bitswap "github.com/ipfs/go-bitswap" + bsnet "github.com/ipfs/go-graphsync/network" + blockstore "github.com/ipfs/go-ipfs-blockstore" + "github.com/libp2p/go-libp2p-core/routing" + "github.com/libp2p/go-libp2p-core/host" +) + +var ctx context.Context +var host host.Host +var router routing.ContentRouting +var bstore blockstore.Blockstore + +network := bsnet.NewFromIPFSHost(host, router) +exchange := bitswap.New(ctx, network, bstore) +``` + +Parameter Notes: + +1. `ctx` is just the parent context for all of Bitswap +2. `network` is a network abstraction provided to Bitswap on top of libp2p & content routing. +3. `bstore` is an IPFS blockstore + +### Get A Block Synchronously + +```golang +var c cid.Cid +var ctx context.Context +var exchange bitswap.Bitswap + +block, err := exchange.GetBlock(ctx, c) +``` + +Parameter Notes: + +1. `ctx` is the context for this request, which can be cancelled to cancel the request +2. `c` is the content ID of the block you're requesting + +### Get Several Blocks Asynchronously + +```golang +var cids []cid.Cid +var ctx context.Context +var exchange bitswap.Bitswap + +blockChannel, err := exchange.GetBlocks(ctx, cids) +``` + +Parameter Notes: + +1. `ctx` is the context for this request, which can be cancelled to cancel the request +2. `cids` is a slice of content IDs for the blocks you're requesting + +### Get Related Blocks Faster With Sessions + +In IPFS, content blocks are often connected to each other through a MerkleDAG. If you know ahead of time that block requests are related, Bitswap can make several optimizations internally in how it requests those blocks in order to get them faster. Bitswap provides a mechanism called a Bitswap Session to manage a series of block requests as part of a single higher level operation. You should initialize a Bitswap Session any time you intend to make a series of block requests that are related -- and whose responses are likely to come from the same peers. + +```golang +var ctx context.Context +var cids []cids.cid +var exchange bitswap.Bitswap + +session := exchange.NewSession(ctx) +blocksChannel, err := session.GetBlocks(ctx, cids) +// later +var relatedCids []cids.cid +relatedBlocksChannel, err := session.GetBlocks(ctx, relatedCids) +``` + +Note that `NewSession` returns an interface with `GetBlock` and `GetBlocks` methods that have the same signature as the overall Bitswap exchange. + +### Tell bitswap a new block was added to the local datastore + +```golang +var blk blocks.Block +var exchange bitswap.Bitswap + +err := exchange.HasBlock(blk) +``` + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Juan Batiz-Benet diff --git a/benchmarks_test.go b/benchmarks_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bd5c0259fa9692fae2614a3737bc3807ef8b8102 --- /dev/null +++ b/benchmarks_test.go @@ -0,0 +1,681 @@ +package bitswap_test + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math" + "math/rand" + "os" + "strconv" + "sync" + "testing" + "time" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + blocks "gitlab.dms3.io/dms3/go-block-format" + protocol "gitlab.dms3.io/p2p/go-p2p-core/protocol" + + bitswap "gitlab.dms3.io/dms3/go-bitswap" + bssession "gitlab.dms3.io/dms3/go-bitswap/internal/session" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + testinstance "gitlab.dms3.io/dms3/go-bitswap/testinstance" + tn "gitlab.dms3.io/dms3/go-bitswap/testnet" + cid "gitlab.dms3.io/dms3/go-cid" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + mockrouting "gitlab.dms3.io/dms3/go-dms3-routing/mock" +) + +type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) + +type distFunc func(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) + +type runStats struct { + DupsRcvd uint64 + BlksRcvd uint64 + MsgSent uint64 + MsgRecd uint64 + Time time.Duration + Name string +} + +var benchmarkLog []runStats + +type bench struct { + name string + nodeCount int + blockCount int + distFn distFunc + fetchFn fetchFunc +} + +var benches = []bench{ + // Fetch from two seed nodes that both have all 100 blocks + // - request one at a time, in series + bench{"3Nodes-AllToAll-OneAtATime", 3, 100, allToAll, oneAtATime}, + // - request all 100 with a single GetBlocks() call + bench{"3Nodes-AllToAll-BigBatch", 3, 100, allToAll, batchFetchAll}, + + // Fetch from two seed nodes, one at a time, where: + // - node A has blocks 0 - 74 + // - node B has blocks 25 - 99 + bench{"3Nodes-Overlap1-OneAtATime", 3, 100, overlap1, oneAtATime}, + + // Fetch from two seed nodes, where: + // - node A has even blocks + // - node B has odd blocks + // - both nodes have every third block + + // - request one at a time, in series + bench{"3Nodes-Overlap3-OneAtATime", 3, 100, overlap2, oneAtATime}, + // - request 10 at a time, in series + bench{"3Nodes-Overlap3-BatchBy10", 3, 100, overlap2, batchFetchBy10}, + // - request all 100 in parallel as individual GetBlock() calls + bench{"3Nodes-Overlap3-AllConcurrent", 3, 100, overlap2, fetchAllConcurrent}, + // - request all 100 with a single GetBlocks() call + bench{"3Nodes-Overlap3-BigBatch", 3, 100, overlap2, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how DMS3 would fetch a file) + bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, + + // Fetch from nine seed nodes, all nodes have all blocks + // - request one at a time, in series + bench{"10Nodes-AllToAll-OneAtATime", 10, 100, allToAll, oneAtATime}, + // - request 10 at a time, in series + bench{"10Nodes-AllToAll-BatchFetchBy10", 10, 100, allToAll, batchFetchBy10}, + // - request all 100 with a single GetBlocks() call + bench{"10Nodes-AllToAll-BigBatch", 10, 100, allToAll, batchFetchAll}, + // - request all 100 in parallel as individual GetBlock() calls + bench{"10Nodes-AllToAll-AllConcurrent", 10, 100, allToAll, fetchAllConcurrent}, + // - request 1, then 10, then 89 blocks (similar to how DMS3 would fetch a file) + bench{"10Nodes-AllToAll-UnixfsFetch", 10, 100, allToAll, unixfsFileFetch}, + // - follow a typical DMS3 request pattern for 1000 blocks + bench{"10Nodes-AllToAll-UnixfsFetchLarge", 10, 1000, allToAll, unixfsFileFetchLarge}, + + // Fetch from nine seed nodes, blocks are distributed randomly across all nodes (no dups) + // - request one at a time, in series + bench{"10Nodes-OnePeerPerBlock-OneAtATime", 10, 100, onePeerPerBlock, oneAtATime}, + // - request all 100 with a single GetBlocks() call + bench{"10Nodes-OnePeerPerBlock-BigBatch", 10, 100, onePeerPerBlock, batchFetchAll}, + // - request 1, then 10, then 89 blocks (similar to how DMS3 would fetch a file) + bench{"10Nodes-OnePeerPerBlock-UnixfsFetch", 10, 100, onePeerPerBlock, unixfsFileFetch}, + + // Fetch from 199 seed nodes, all nodes have all blocks, fetch all 20 blocks with a single GetBlocks() call + bench{"200Nodes-AllToAll-BigBatch", 200, 20, allToAll, batchFetchAll}, +} + +func BenchmarkFixedDelay(b *testing.B) { + benchmarkLog = nil + fixedDelay := delay.Fixed(10 * time.Millisecond) + bstoreLatency := time.Duration(0) + + for _, bch := range benches { + b.Run(bch.name, func(b *testing.B) { + subtestDistributeAndFetch(b, bch.nodeCount, bch.blockCount, fixedDelay, bstoreLatency, bch.distFn, bch.fetchFn) + }) + } + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +type mixedBench struct { + bench + fetcherCount int // number of nodes that fetch data + oldSeedCount int // number of seed nodes running old version of Bitswap +} + +var mixedBenches = []mixedBench{ + mixedBench{bench{"3Nodes-Overlap3-OneAtATime", 3, 10, overlap2, oneAtATime}, 1, 2}, + mixedBench{bench{"3Nodes-AllToAll-OneAtATime", 3, 10, allToAll, oneAtATime}, 1, 2}, + mixedBench{bench{"3Nodes-Overlap3-AllConcurrent", 3, 10, overlap2, fetchAllConcurrent}, 1, 2}, + // mixedBench{bench{"3Nodes-Overlap3-UnixfsFetch", 3, 100, overlap2, unixfsFileFetch}, 1, 2}, +} + +func BenchmarkFetchFromOldBitswap(b *testing.B) { + benchmarkLog = nil + fixedDelay := delay.Fixed(10 * time.Millisecond) + bstoreLatency := time.Duration(0) + + for _, bch := range mixedBenches { + b.Run(bch.name, func(b *testing.B) { + fetcherCount := bch.fetcherCount + oldSeedCount := bch.oldSeedCount + newSeedCount := bch.nodeCount - (fetcherCount + oldSeedCount) + + net := tn.VirtualNetwork(mockrouting.NewServer(), fixedDelay) + + // Simulate an older Bitswap node (old protocol ID) that doesn't + // send DONT_HAVE responses + oldProtocol := []protocol.ID{bsnet.ProtocolBitswapOneOne} + oldNetOpts := []bsnet.NetOpt{bsnet.SupportedProtocols(oldProtocol)} + oldBsOpts := []bitswap.Option{bitswap.SetSendDontHaves(false)} + oldNodeGenerator := testinstance.NewTestInstanceGenerator(net, oldNetOpts, oldBsOpts) + + // Regular new Bitswap node + newNodeGenerator := testinstance.NewTestInstanceGenerator(net, nil, nil) + var instances []testinstance.Instance + + // Create new nodes (fetchers + seeds) + for i := 0; i < fetcherCount+newSeedCount; i++ { + inst := newNodeGenerator.Next() + instances = append(instances, inst) + } + // Create old nodes (just seeds) + for i := 0; i < oldSeedCount; i++ { + inst := oldNodeGenerator.Next() + instances = append(instances, inst) + } + // Connect all the nodes together + testinstance.ConnectInstances(instances) + + // Generate blocks, with a smaller root block + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(bch.blockCount, stdBlockSize) + blocks[0] = rootBlock[0] + + // Run the distribution + runDistributionMulti(b, instances[:fetcherCount], instances[fetcherCount:], blocks, bstoreLatency, bch.distFn, bch.fetchFn) + + newNodeGenerator.Close() + oldNodeGenerator.Close() + }) + } + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +const datacenterSpeed = 5 * time.Millisecond +const fastSpeed = 60 * time.Millisecond +const mediumSpeed = 200 * time.Millisecond +const slowSpeed = 800 * time.Millisecond +const superSlowSpeed = 4000 * time.Millisecond +const datacenterDistribution = 3 * time.Millisecond +const distribution = 20 * time.Millisecond +const datacenterBandwidth = 125000000.0 +const datacenterBandwidthDeviation = 3000000.0 +const fastBandwidth = 1250000.0 +const fastBandwidthDeviation = 300000.0 +const mediumBandwidth = 500000.0 +const mediumBandwidthDeviation = 80000.0 +const slowBandwidth = 100000.0 +const slowBandwidthDeviation = 16500.0 +const rootBlockSize = 800 +const stdBlockSize = 8000 +const largeBlockSize = int64(256 * 1024) + +func BenchmarkRealWorld(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + fastNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, slowSpeed-fastSpeed, + 0.0, 0.0, distribution, randomGen) + fastNetworkDelay := delay.Delay(fastSpeed, fastNetworkDelayGenerator) + fastBandwidthGenerator := tn.VariableRateLimitGenerator(fastBandwidth, fastBandwidthDeviation, randomGen) + averageNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, slowSpeed-fastSpeed, + 0.3, 0.3, distribution, randomGen) + averageNetworkDelay := delay.Delay(fastSpeed, averageNetworkDelayGenerator) + averageBandwidthGenerator := tn.VariableRateLimitGenerator(mediumBandwidth, mediumBandwidthDeviation, randomGen) + slowNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + mediumSpeed-fastSpeed, superSlowSpeed-fastSpeed, + 0.3, 0.3, distribution, randomGen) + slowNetworkDelay := delay.Delay(fastSpeed, slowNetworkDelayGenerator) + slowBandwidthGenerator := tn.VariableRateLimitGenerator(slowBandwidth, slowBandwidthDeviation, randomGen) + bstoreLatency := time.Duration(0) + + b.Run("200Nodes-AllToAll-BigBatch-FastNetwork", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 300, 200, fastNetworkDelay, fastBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) + }) + b.Run("200Nodes-AllToAll-BigBatch-AverageVariableSpeedNetwork", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 300, 200, averageNetworkDelay, averageBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) + }) + b.Run("200Nodes-AllToAll-BigBatch-SlowVariableSpeedNetwork", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 300, 200, slowNetworkDelay, slowBandwidthGenerator, stdBlockSize, bstoreLatency, allToAll, batchFetchAll) + }) + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/rw-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenter(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Nodes-Overlap3-UnixfsFetch", func(b *testing.B) { + subtestDistributeAndFetchRateLimited(b, 3, 100, datacenterNetworkDelay, datacenterBandwidthGenerator, largeBlockSize, bstoreLatency, allToAll, unixfsFileFetch) + }) + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { + benchmarkLog = nil + benchmarkSeed, err := strconv.ParseInt(os.Getenv("BENCHMARK_SEED"), 10, 64) + var randomGen *rand.Rand = nil + if err == nil { + randomGen = rand.New(rand.NewSource(benchmarkSeed)) + } + + datacenterNetworkDelayGenerator := tn.InternetLatencyDelayGenerator( + fastSpeed-datacenterSpeed, (fastSpeed-datacenterSpeed)/2, + 0.0, 0.0, datacenterDistribution, randomGen) + datacenterNetworkDelay := delay.Delay(datacenterSpeed, datacenterNetworkDelayGenerator) + datacenterBandwidthGenerator := tn.VariableRateLimitGenerator(datacenterBandwidth, datacenterBandwidthDeviation, randomGen) + bstoreLatency := time.Millisecond * 25 + + b.Run("3Leech3Seed-AllToAll-UnixfsFetch", func(b *testing.B) { + d := datacenterNetworkDelay + rateLimitGenerator := datacenterBandwidthGenerator + blockSize := largeBlockSize + df := allToAll + ff := unixfsFileFetchLarge + numnodes := 6 + numblks := 1000 + + for i := 0; i < b.N; i++ { + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + instances := ig.Instances(numnodes) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + runDistributionMulti(b, instances[:3], instances[3:], blocks, bstoreLatency, df, ff) + } + }) + + out, _ := json.MarshalIndent(benchmarkLog, "", " ") + _ = ioutil.WriteFile("tmp/rb-benchmark.json", out, 0666) + printResults(benchmarkLog) +} + +func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + for i := 0; i < b.N; i++ { + net := tn.VirtualNetwork(mockrouting.NewServer(), d) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + + instances := ig.Instances(numnodes) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(numblks, stdBlockSize) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) + ig.Close() + } +} + +func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + for i := 0; i < b.N; i++ { + net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + instances := ig.Instances(numnodes) + rootBlock := testutil.GenerateBlocksOfSize(1, rootBlockSize) + blocks := testutil.GenerateBlocksOfSize(numblks, blockSize) + blocks[0] = rootBlock[0] + runDistribution(b, instances, blocks, bstoreLatency, df, ff) + } +} + +func runDistributionMulti(b *testing.B, fetchers []testinstance.Instance, seeds []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + // Distribute blocks to seed nodes + df(b, seeds, blocks) + + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) + var ks []cid.Cid + for _, blk := range blocks { + ks = append(ks, blk.Cid()) + } + + start := time.Now() + var wg sync.WaitGroup + for _, fetcher := range fetchers { + wg.Add(1) + + go func(ftchr testinstance.Instance) { + defer wg.Done() + + ff(b, ftchr.Exchange, ks) + }(fetcher) + } + wg.Wait() + + // Collect statistics + fetcher := fetchers[0] + st, err := fetcher.Exchange.Stat() + if err != nil { + b.Fatal(err) + } + + for _, fetcher := range fetchers { + nst := fetcher.Adapter.Stats() + stats := runStats{ + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), + } + benchmarkLog = append(benchmarkLog, stats) + } + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) +} + +func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []blocks.Block, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { + numnodes := len(instances) + fetcher := instances[numnodes-1] + + // Distribute blocks to seed nodes + seeds := instances[:numnodes-1] + df(b, seeds, blocks) + + // Set the blockstore latency on seed nodes + if bstoreLatency > 0 { + for _, i := range seeds { + i.SetBlockstoreLatency(bstoreLatency) + } + } + + // Fetch blocks (from seed nodes to leech nodes) + var ks []cid.Cid + for _, blk := range blocks { + ks = append(ks, blk.Cid()) + } + + start := time.Now() + ff(b, fetcher.Exchange, ks) + + // Collect statistics + st, err := fetcher.Exchange.Stat() + if err != nil { + b.Fatal(err) + } + + nst := fetcher.Adapter.Stats() + stats := runStats{ + Time: time.Since(start), + MsgRecd: nst.MessagesRecvd, + MsgSent: nst.MessagesSent, + DupsRcvd: st.DupBlksReceived, + BlksRcvd: st.BlocksReceived, + Name: b.Name(), + } + benchmarkLog = append(benchmarkLog, stats) + // b.Logf("send/recv: %d / %d (dups: %d)", nst.MessagesSent, nst.MessagesRecvd, st.DupBlksReceived) +} + +func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { + for _, p := range provs { + if err := p.Blockstore().PutMany(blocks); err != nil { + b.Fatal(err) + } + } +} + +// overlap1 gives the first 75 blocks to the first peer, and the last 75 blocks +// to the second peer. This means both peers have the middle 50 blocks +func overlap1(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { + if len(provs) != 2 { + b.Fatal("overlap1 only works with 2 provs") + } + bill := provs[0] + jeff := provs[1] + + if err := bill.Blockstore().PutMany(blks[:75]); err != nil { + b.Fatal(err) + } + if err := jeff.Blockstore().PutMany(blks[25:]); err != nil { + b.Fatal(err) + } +} + +// overlap2 gives every even numbered block to the first peer, odd numbered +// blocks to the second. it also gives every third block to both peers +func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { + if len(provs) != 2 { + b.Fatal("overlap2 only works with 2 provs") + } + bill := provs[0] + jeff := provs[1] + + for i, blk := range blks { + even := i%2 == 0 + third := i%3 == 0 + if third || even { + if err := bill.Blockstore().Put(blk); err != nil { + b.Fatal(err) + } + } + if third || !even { + if err := jeff.Blockstore().Put(blk); err != nil { + b.Fatal(err) + } + } + } +} + +// onePeerPerBlock picks a random peer to hold each block +// with this layout, we shouldnt actually ever see any duplicate blocks +// but we're mostly just testing performance of the sync algorithm +func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { + for _, blk := range blks { + err := provs[rand.Intn(len(provs))].Blockstore().Put(blk) + if err != nil { + b.Fatal(err) + } + } +} + +func oneAtATime(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()).(*bssession.Session) + for _, c := range ks { + _, err := ses.GetBlock(context.Background(), c) + if err != nil { + b.Fatal(err) + } + } + // b.Logf("Session fetch latency: %s", ses.GetAverageLatency()) +} + +// fetch data in batches, 10 at a time +func batchFetchBy10(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + for i := 0; i < len(ks); i += 10 { + out, err := ses.GetBlocks(context.Background(), ks[i:i+10]) + if err != nil { + b.Fatal(err) + } + for range out { + } + } +} + +// fetch each block at the same time concurrently +func fetchAllConcurrent(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + + var wg sync.WaitGroup + for _, c := range ks { + wg.Add(1) + go func(c cid.Cid) { + defer wg.Done() + _, err := ses.GetBlock(context.Background(), c) + if err != nil { + b.Error(err) + } + }(c) + } + wg.Wait() +} + +func batchFetchAll(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + out, err := ses.GetBlocks(context.Background(), ks) + if err != nil { + b.Fatal(err) + } + for range out { + } +} + +// simulates the fetch pattern of trying to sync a unixfs file graph as fast as possible +func unixfsFileFetch(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + _, err := ses.GetBlock(context.Background(), ks[0]) + if err != nil { + b.Fatal(err) + } + + out, err := ses.GetBlocks(context.Background(), ks[1:11]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + out, err = ses.GetBlocks(context.Background(), ks[11:]) + if err != nil { + b.Fatal(err) + } + for range out { + } +} + +func unixfsFileFetchLarge(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) { + ses := bs.NewSession(context.Background()) + _, err := ses.GetBlock(context.Background(), ks[0]) + if err != nil { + b.Fatal(err) + } + + out, err := ses.GetBlocks(context.Background(), ks[1:11]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + out, err = ses.GetBlocks(context.Background(), ks[11:100]) + if err != nil { + b.Fatal(err) + } + for range out { + } + + rest := ks[100:] + for len(rest) > 0 { + var batch [][]cid.Cid + for i := 0; i < 5 && len(rest) > 0; i++ { + cnt := 10 + if len(rest) < 10 { + cnt = len(rest) + } + group := rest[:cnt] + rest = rest[cnt:] + batch = append(batch, group) + } + + var anyErr error + var wg sync.WaitGroup + for _, group := range batch { + wg.Add(1) + go func(grp []cid.Cid) { + defer wg.Done() + + out, err = ses.GetBlocks(context.Background(), grp) + if err != nil { + anyErr = err + } + for range out { + } + }(group) + } + wg.Wait() + + // Note: b.Fatal() cannot be called from within a go-routine + if anyErr != nil { + b.Fatal(anyErr) + } + } +} + +func printResults(rs []runStats) { + nameOrder := make([]string, 0) + names := make(map[string]struct{}) + for i := 0; i < len(rs); i++ { + if _, ok := names[rs[i].Name]; !ok { + nameOrder = append(nameOrder, rs[i].Name) + names[rs[i].Name] = struct{}{} + } + } + + for i := 0; i < len(names); i++ { + name := nameOrder[i] + count := 0 + sent := 0.0 + rcvd := 0.0 + dups := 0.0 + blks := 0.0 + elpd := 0.0 + for i := 0; i < len(rs); i++ { + if rs[i].Name == name { + count++ + sent += float64(rs[i].MsgSent) + rcvd += float64(rs[i].MsgRecd) + dups += float64(rs[i].DupsRcvd) + blks += float64(rs[i].BlksRcvd) + elpd += float64(rs[i].Time) + } + } + sent /= float64(count) + rcvd /= float64(count) + dups /= float64(count) + blks /= float64(count) + + label := fmt.Sprintf("%s (%d runs / %.2fs):", name, count, elpd/1000000000.0) + fmt.Printf("%-75s %s: sent %d, recv %d, dups %d / %d\n", + label, + fmtDuration(time.Duration(int64(math.Round(elpd/float64(count))))), + int64(math.Round(sent)), int64(math.Round(rcvd)), + int64(math.Round(dups)), int64(math.Round(blks))) + } +} + +func fmtDuration(d time.Duration) string { + d = d.Round(time.Millisecond) + s := d / time.Second + d -= s * time.Second + ms := d / time.Millisecond + return fmt.Sprintf("%d.%03ds", s, ms) +} diff --git a/bitswap.go b/bitswap.go new file mode 100644 index 0000000000000000000000000000000000000000..41a089efff301c90c83bcc05bdb6f5dbc5579eb2 --- /dev/null +++ b/bitswap.go @@ -0,0 +1,582 @@ +// Package bitswap implements the DMS3 exchange interface with the BitSwap +// bilateral exchange protocol. +package bitswap + +import ( + "context" + "errors" + "fmt" + + "sync" + "time" + + delay "gitlab.dms3.io/dms3/go-dms3-delay" + + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + deciface "gitlab.dms3.io/dms3/go-bitswap/decision" + bsbpm "gitlab.dms3.io/dms3/go-bitswap/internal/blockpresencemanager" + decision "gitlab.dms3.io/dms3/go-bitswap/internal/decision" + bsgetter "gitlab.dms3.io/dms3/go-bitswap/internal/getter" + bsmq "gitlab.dms3.io/dms3/go-bitswap/internal/messagequeue" + notifications "gitlab.dms3.io/dms3/go-bitswap/internal/notifications" + bspm "gitlab.dms3.io/dms3/go-bitswap/internal/peermanager" + bspqm "gitlab.dms3.io/dms3/go-bitswap/internal/providerquerymanager" + bssession "gitlab.dms3.io/dms3/go-bitswap/internal/session" + bssim "gitlab.dms3.io/dms3/go-bitswap/internal/sessioninterestmanager" + bssm "gitlab.dms3.io/dms3/go-bitswap/internal/sessionmanager" + bsspm "gitlab.dms3.io/dms3/go-bitswap/internal/sessionpeermanager" + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + blockstore "gitlab.dms3.io/dms3/go-dms3-blockstore" + exchange "gitlab.dms3.io/dms3/go-dms3-exchange-interface" + logging "gitlab.dms3.io/dms3/go-log" + metrics "gitlab.dms3.io/dms3/go-metrics-interface" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +var log = logging.Logger("bitswap") +var sflog = log.Desugar() + +var _ exchange.SessionExchange = (*Bitswap)(nil) + +const ( + // these requests take at _least_ two minutes at the moment. + provideTimeout = time.Minute * 3 + defaultProvSearchDelay = time.Second + + // Number of concurrent workers in decision engine that process requests to the blockstore + defaulEngineBlockstoreWorkerCount = 128 +) + +var ( + // HasBlockBufferSize is the buffer size of the channel for new blocks + // that need to be provided. They should get pulled over by the + // provideCollector even before they are actually provided. + // TODO: Does this need to be this large givent that? + HasBlockBufferSize = 256 + provideKeysBufferSize = 2048 + provideWorkerMax = 6 + + // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size + metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} +) + +// Option defines the functional option type that can be used to configure +// bitswap instances +type Option func(*Bitswap) + +// ProvideEnabled is an option for enabling/disabling provide announcements +func ProvideEnabled(enabled bool) Option { + return func(bs *Bitswap) { + bs.provideEnabled = enabled + } +} + +// ProviderSearchDelay overwrites the global provider search delay +func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { + return func(bs *Bitswap) { + bs.provSearchDelay = newProvSearchDelay + } +} + +// RebroadcastDelay overwrites the global provider rebroadcast delay +func RebroadcastDelay(newRebroadcastDelay delay.D) Option { + return func(bs *Bitswap) { + bs.rebroadcastDelay = newRebroadcastDelay + } +} + +// EngineBlockstoreWorkerCount sets the number of worker threads used for +// blockstore operations in the decision engine +func EngineBlockstoreWorkerCount(count int) Option { + if count <= 0 { + panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) + } + return func(bs *Bitswap) { + bs.engineBstoreWorkerCount = count + } +} + +// SetSendDontHaves indicates what to do when the engine receives a want-block +// for a block that is not in the blockstore. Either +// - Send a DONT_HAVE message +// - Simply don't respond +// This option is only used for testing. +func SetSendDontHaves(send bool) Option { + return func(bs *Bitswap) { + bs.engine.SetSendDontHaves(send) + } +} + +// Configures the engine to use the given score decision logic. +func WithScoreLedger(scoreLedger deciface.ScoreLedger) Option { + return func(bs *Bitswap) { + bs.engineScoreLedger = scoreLedger + } +} + +// New initializes a BitSwap instance that communicates over the provided +// BitSwapNetwork. This function registers the returned instance as the network +// delegate. Runs until context is cancelled or bitswap.Close is called. +func New(parent context.Context, network bsnet.BitSwapNetwork, + bstore blockstore.Blockstore, options ...Option) exchange.Interface { + + // important to use provided parent context (since it may include important + // loggable data). It's probably not a good idea to allow bitswap to be + // coupled to the concerns of the dms3 daemon in this way. + // + // FIXME(btc) Now that bitswap manages itself using a process, it probably + // shouldn't accept a context anymore. Clients should probably use Close() + // exclusively. We should probably find another way to share logging data + ctx, cancelFunc := context.WithCancel(parent) + ctx = metrics.CtxSubScope(ctx, "bitswap") + dupHist := metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate"+ + " data blocks recived").Histogram(metricsBuckets) + allHist := metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all"+ + " data blocks recived").Histogram(metricsBuckets) + + sentHistogram := metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by"+ + " this bitswap").Histogram(metricsBuckets) + + px := process.WithTeardown(func() error { + return nil + }) + + // onDontHaveTimeout is called when a want-block is sent to a peer that + // has an old version of Bitswap that doesn't support DONT_HAVE messages, + // or when no response is received within a timeout. + var sm *bssm.SessionManager + onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { + // Simulate a message arriving with DONT_HAVEs + sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) + } + peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { + return bsmq.New(ctx, p, network, onDontHaveTimeout) + } + + sim := bssim.New() + bpm := bsbpm.New() + pm := bspm.New(ctx, peerQueueFactory, network.Self()) + pqm := bspqm.New(ctx, network) + + sessionFactory := func( + sessctx context.Context, + sessmgr bssession.SessionManager, + id uint64, + spm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) bssm.Session { + return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + } + sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { + return bsspm.New(id, network.ConnectionManager()) + } + notif := notifications.New() + sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) + + bs := &Bitswap{ + blockstore: bstore, + network: network, + process: px, + newBlocks: make(chan cid.Cid, HasBlockBufferSize), + provideKeys: make(chan cid.Cid, provideKeysBufferSize), + pm: pm, + pqm: pqm, + sm: sm, + sim: sim, + notif: notif, + counters: new(counters), + dupMetric: dupHist, + allMetric: allHist, + sentHistogram: sentHistogram, + provideEnabled: true, + provSearchDelay: defaultProvSearchDelay, + rebroadcastDelay: delay.Fixed(time.Minute), + engineBstoreWorkerCount: defaulEngineBlockstoreWorkerCount, + } + + // apply functional options before starting and running bitswap + for _, option := range options { + option(bs) + } + + // Set up decision engine + bs.engine = decision.NewEngine(bstore, bs.engineBstoreWorkerCount, network.ConnectionManager(), network.Self(), bs.engineScoreLedger) + + bs.pqm.Startup() + network.SetDelegate(bs) + + // Start up bitswaps async worker routines + bs.startWorkers(ctx, px) + bs.engine.StartWorkers(ctx, px) + + // bind the context and process. + // do it over here to avoid closing before all setup is done. + go func() { + <-px.Closing() // process closes first + sm.Shutdown() + cancelFunc() + notif.Shutdown() + }() + procctx.CloseAfterContext(px, ctx) // parent cancelled first + + return bs +} + +// Bitswap instances implement the bitswap protocol. +type Bitswap struct { + pm *bspm.PeerManager + + // the provider query manager manages requests to find providers + pqm *bspqm.ProviderQueryManager + + // the engine is the bit of logic that decides who to send which blocks to + engine *decision.Engine + + // network delivers messages on behalf of the session + network bsnet.BitSwapNetwork + + // blockstore is the local database + // NB: ensure threadsafety + blockstore blockstore.Blockstore + + // manages channels of outgoing blocks for sessions + notif notifications.PubSub + + // newBlocks is a channel for newly added blocks to be provided to the + // network. blocks pushed down this channel get buffered and fed to the + // provideKeys channel later on to avoid too much network activity + newBlocks chan cid.Cid + // provideKeys directly feeds provide workers + provideKeys chan cid.Cid + + process process.Process + + // Counters for various statistics + counterLk sync.Mutex + counters *counters + + // Metrics interface metrics + dupMetric metrics.Histogram + allMetric metrics.Histogram + sentHistogram metrics.Histogram + + // External statistics interface + wiretap WireTap + + // the SessionManager routes requests to interested sessions + sm *bssm.SessionManager + + // the SessionInterestManager keeps track of which sessions are interested + // in which CIDs + sim *bssim.SessionInterestManager + + // whether or not to make provide announcements + provideEnabled bool + + // how long to wait before looking for providers in a session + provSearchDelay time.Duration + + // how often to rebroadcast providing requests to find more optimized providers + rebroadcastDelay delay.D + + // how many worker threads to start for decision engine blockstore worker + engineBstoreWorkerCount int + + // the score ledger used by the decision engine + engineScoreLedger deciface.ScoreLedger +} + +type counters struct { + blocksRecvd uint64 + dupBlocksRecvd uint64 + dupDataRecvd uint64 + blocksSent uint64 + dataSent uint64 + dataRecvd uint64 + messagesRecvd uint64 +} + +// GetBlock attempts to retrieve a particular block from peers within the +// deadline enforced by the context. +func (bs *Bitswap) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { + return bsgetter.SyncGetBlock(parent, k, bs.GetBlocks) +} + +// WantlistForPeer returns the currently understood list of blocks requested by a +// given peer. +func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { + var out []cid.Cid + for _, e := range bs.engine.WantlistForPeer(p) { + out = append(out, e.Cid) + } + return out +} + +// LedgerForPeer returns aggregated data about blocks swapped and communication +// with a given peer. +func (bs *Bitswap) LedgerForPeer(p peer.ID) *decision.Receipt { + return bs.engine.LedgerForPeer(p) +} + +// GetBlocks returns a channel where the caller may receive blocks that +// correspond to the provided |keys|. Returns an error if BitSwap is unable to +// begin this request within the deadline enforced by the context. +// +// NB: Your request remains open until the context expires. To conserve +// resources, provide a context with a reasonably short deadline (ie. not one +// that lasts throughout the lifetime of the server) +func (bs *Bitswap) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) + return session.GetBlocks(ctx, keys) +} + +// HasBlock announces the existence of a block to this bitswap service. The +// service will potentially notify its peers. +func (bs *Bitswap) HasBlock(blk blocks.Block) error { + return bs.receiveBlocksFrom(context.Background(), "", []blocks.Block{blk}, nil, nil) +} + +// TODO: Some of this stuff really only needs to be done when adding a block +// from the user, not when receiving it from the network. +// In case you run `git blame` on this comment, I'll save you some time: ask +// @whyrusleeping, I don't know the answers you seek. +func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { + select { + case <-bs.process.Closing(): + return errors.New("bitswap is closed") + default: + } + + wanted := blks + + // If blocks came from the network + if from != "" { + var notWanted []blocks.Block + wanted, notWanted = bs.sim.SplitWantedUnwanted(blks) + for _, b := range notWanted { + log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) + } + } + + // Put wanted blocks into blockstore + if len(wanted) > 0 { + err := bs.blockstore.PutMany(wanted) + if err != nil { + log.Errorf("Error writing %d blocks to datastore: %s", len(wanted), err) + return err + } + } + + // NOTE: There exists the possiblity for a race condition here. If a user + // creates a node, then adds it to the dagservice while another goroutine + // is waiting on a GetBlock for that object, they will receive a reference + // to the same node. We should address this soon, but i'm not going to do + // it now as it requires more thought and isnt causing immediate problems. + + allKs := make([]cid.Cid, 0, len(blks)) + for _, b := range blks { + allKs = append(allKs, b.Cid()) + } + + // If the message came from the network + if from != "" { + // Inform the PeerManager so that we can calculate per-peer latency + combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) + combined = append(combined, allKs...) + combined = append(combined, haves...) + combined = append(combined, dontHaves...) + bs.pm.ResponseReceived(from, combined) + } + + // Send all block keys (including duplicates) to any sessions that want them. + // (The duplicates are needed by sessions for accounting purposes) + bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) + + // Send wanted blocks to decision engine + bs.engine.ReceiveFrom(from, wanted, haves) + + // Publish the block to any Bitswap clients that had requested blocks. + // (the sessions use this pubsub mechanism to inform clients of incoming + // blocks) + for _, b := range wanted { + bs.notif.Publish(b) + } + + // If the reprovider is enabled, send wanted blocks to reprovider + if bs.provideEnabled { + for _, blk := range wanted { + select { + case bs.newBlocks <- blk.Cid(): + // send block off to be reprovided + case <-bs.process.Closing(): + return bs.process.Close() + } + } + } + + if from != "" { + for _, b := range wanted { + log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) + } + } + + return nil +} + +// ReceiveMessage is called by the network interface when a new message is +// received. +func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { + bs.counterLk.Lock() + bs.counters.messagesRecvd++ + bs.counterLk.Unlock() + + // This call records changes to wantlists, blocks received, + // and number of bytes transfered. + bs.engine.MessageReceived(ctx, p, incoming) + // TODO: this is bad, and could be easily abused. + // Should only track *useful* messages in ledger + + if bs.wiretap != nil { + bs.wiretap.MessageReceived(p, incoming) + } + + iblocks := incoming.Blocks() + + if len(iblocks) > 0 { + bs.updateReceiveCounters(iblocks) + for _, b := range iblocks { + log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) + } + } + + haves := incoming.Haves() + dontHaves := incoming.DontHaves() + if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { + // Process blocks + err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) + if err != nil { + log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) + return + } + } +} + +func (bs *Bitswap) updateReceiveCounters(blocks []blocks.Block) { + // Check which blocks are in the datastore + // (Note: any errors from the blockstore are simply logged out in + // blockstoreHas()) + blocksHas := bs.blockstoreHas(blocks) + + bs.counterLk.Lock() + defer bs.counterLk.Unlock() + + // Do some accounting for each block + for i, b := range blocks { + has := blocksHas[i] + + blkLen := len(b.RawData()) + bs.allMetric.Observe(float64(blkLen)) + if has { + bs.dupMetric.Observe(float64(blkLen)) + } + + c := bs.counters + + c.blocksRecvd++ + c.dataRecvd += uint64(blkLen) + if has { + c.dupBlocksRecvd++ + c.dupDataRecvd += uint64(blkLen) + } + } +} + +func (bs *Bitswap) blockstoreHas(blks []blocks.Block) []bool { + res := make([]bool, len(blks)) + + wg := sync.WaitGroup{} + for i, block := range blks { + wg.Add(1) + go func(i int, b blocks.Block) { + defer wg.Done() + + has, err := bs.blockstore.Has(b.Cid()) + if err != nil { + log.Infof("blockstore.Has error: %s", err) + has = false + } + + res[i] = has + }(i, block) + } + wg.Wait() + + return res +} + +// PeerConnected is called by the network interface +// when a peer initiates a new connection to bitswap. +func (bs *Bitswap) PeerConnected(p peer.ID) { + bs.pm.Connected(p) + bs.engine.PeerConnected(p) +} + +// PeerDisconnected is called by the network interface when a peer +// closes a connection +func (bs *Bitswap) PeerDisconnected(p peer.ID) { + bs.pm.Disconnected(p) + bs.engine.PeerDisconnected(p) +} + +// ReceiveError is called by the network interface when an error happens +// at the network layer. Currently just logs error. +func (bs *Bitswap) ReceiveError(err error) { + log.Infof("Bitswap ReceiveError: %s", err) + // TODO log the network error + // TODO bubble the network error up to the parent context/error logger +} + +// Close is called to shutdown Bitswap +func (bs *Bitswap) Close() error { + return bs.process.Close() +} + +// GetWantlist returns the current local wantlist (both want-blocks and +// want-haves). +func (bs *Bitswap) GetWantlist() []cid.Cid { + return bs.pm.CurrentWants() +} + +// GetWantBlocks returns the current list of want-blocks. +func (bs *Bitswap) GetWantBlocks() []cid.Cid { + return bs.pm.CurrentWantBlocks() +} + +// GetWanthaves returns the current list of want-haves. +func (bs *Bitswap) GetWantHaves() []cid.Cid { + return bs.pm.CurrentWantHaves() +} + +// IsOnline is needed to match go-dms3-exchange-interface +func (bs *Bitswap) IsOnline() bool { + return true +} + +// NewSession generates a new Bitswap session. You should use this, rather +// that calling Bitswap.GetBlocks, any time you intend to do several related +// block requests in a row. The session returned will have it's own GetBlocks +// method, but the session will use the fact that the requests are related to +// be more efficient in its requests to peers. If you are using a session +// from go-blockservice, it will create a bitswap session automatically. +func (bs *Bitswap) NewSession(ctx context.Context) exchange.Fetcher { + return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) +} diff --git a/bitswap_test.go b/bitswap_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ad0c2a112278b372487a9defb05917530bd36cd4 --- /dev/null +++ b/bitswap_test.go @@ -0,0 +1,1018 @@ +package bitswap_test + +import ( + "bytes" + "context" + "fmt" + "sync" + "testing" + "time" + + bitswap "gitlab.dms3.io/dms3/go-bitswap" + deciface "gitlab.dms3.io/dms3/go-bitswap/decision" + decision "gitlab.dms3.io/dms3/go-bitswap/internal/decision" + bssession "gitlab.dms3.io/dms3/go-bitswap/internal/session" + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + testinstance "gitlab.dms3.io/dms3/go-bitswap/testinstance" + tn "gitlab.dms3.io/dms3/go-bitswap/testnet" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + detectrace "gitlab.dms3.io/dms3/go-detect-race" + blockstore "gitlab.dms3.io/dms3/go-dms3-blockstore" + blocksutil "gitlab.dms3.io/dms3/go-dms3-blocksutil" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + mockrouting "gitlab.dms3.io/dms3/go-dms3-routing/mock" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" + p2ptestutil "gitlab.dms3.io/p2p/go-p2p-netutil" + travis "gitlab.dms3.io/p2p/go-p2p-testing/ci/travis" + tu "gitlab.dms3.io/p2p/go-p2p-testing/etc" +) + +// FIXME the tests are really sensitive to the network delay. fix them to work +// well under varying conditions +const kNetworkDelay = 0 * time.Millisecond + +func getVirtualNetwork() tn.Network { + return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) +} + +func TestClose(t *testing.T) { + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + block := bgen.Next() + bitswap := ig.Next() + + bitswap.Exchange.Close() + _, err := bitswap.Exchange.GetBlock(context.Background(), block.Cid()) + if err == nil { + t.Fatal("expected GetBlock to fail") + } +} + +func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this + + rs := mockrouting.NewServer() + net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + block := blocks.NewBlock([]byte("block")) + pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) + err := rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network + if err != nil { + t.Fatal(err) + } + + solo := ig.Next() + defer solo.Exchange.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + defer cancel() + _, err = solo.Exchange.GetBlock(ctx, block.Cid()) + + if err != context.DeadlineExceeded { + t.Fatal("Expected DeadlineExceeded error") + } +} + +func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + if err := hasBlock.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + + wantsBlock := peers[1] + defer wantsBlock.Exchange.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + received, err := wantsBlock.Exchange.GetBlock(ctx, block.Cid()) + if err != nil { + t.Log(err) + t.Fatal("Expected to succeed") + } + + if !bytes.Equal(block.RawData(), received.RawData()) { + t.Fatal("Data doesn't match") + } +} + +func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + bsOpts := []bitswap.Option{bitswap.ProvideEnabled(false), bitswap.ProviderSearchDelay(50 * time.Millisecond)} + ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) + defer ig.Close() + + hasBlock := ig.Next() + defer hasBlock.Exchange.Close() + + wantsBlock := ig.Next() + defer wantsBlock.Exchange.Close() + + if err := hasBlock.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Millisecond) + defer cancel() + + ns := wantsBlock.Exchange.NewSession(ctx).(*bssession.Session) + + received, err := ns.GetBlock(ctx, block.Cid()) + if received != nil { + t.Fatalf("Expected to find nothing, found %s", received) + } + + if err != context.DeadlineExceeded { + t.Fatal("Expected deadline exceeded") + } +} + +// Tests that a received block is not stored in the blockstore if the block was +// not requested by the client +func TestUnwantedBlockNotAdded(t *testing.T) { + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + block := blocks.NewBlock([]byte("block")) + bsMessage := bsmsg.New(true) + bsMessage.AddBlock(block) + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + peers := ig.Instances(2) + hasBlock := peers[0] + defer hasBlock.Exchange.Close() + + if err := hasBlock.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + + doesNotWantBlock := peers[1] + defer doesNotWantBlock.Exchange.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Peer, bsMessage) + + blockInStore, err := doesNotWantBlock.Blockstore().Has(block.Cid()) + if err != nil || blockInStore { + t.Fatal("Unwanted block added to block store") + } +} + +// Tests that a received block is returned to the client and stored in the +// blockstore in the following scenario: +// - the want for the block has been requested by the client +// - the want for the block has not yet been sent out to a peer +// (because the live request queue is full) +func TestPendingBlockAdded(t *testing.T) { + ctx := context.Background() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + bg := blocksutil.NewBlockGenerator() + sessionBroadcastWantCapacity := 4 + + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + + instance := ig.Instances(1)[0] + defer instance.Exchange.Close() + + oneSecCtx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + // Request enough blocks to exceed the session's broadcast want list + // capacity (by one block). The session will put the remaining block + // into the "tofetch" queue + blks := bg.Blocks(sessionBroadcastWantCapacity + 1) + ks := make([]cid.Cid, 0, len(blks)) + for _, b := range blks { + ks = append(ks, b.Cid()) + } + outch, err := instance.Exchange.GetBlocks(ctx, ks) + if err != nil { + t.Fatal(err) + } + + // Wait a little while to make sure the session has time to process the wants + time.Sleep(time.Millisecond * 20) + + // Simulate receiving a message which contains the block in the "tofetch" queue + lastBlock := blks[len(blks)-1] + bsMessage := bsmsg.New(true) + bsMessage.AddBlock(lastBlock) + unknownPeer := peer.ID("QmUHfvCQrzyR6vFXmeyCptfCWedfcmfa12V6UuziDtrw23") + instance.Exchange.ReceiveMessage(oneSecCtx, unknownPeer, bsMessage) + + // Make sure Bitswap adds the block to the output channel + blkrecvd, ok := <-outch + if !ok { + t.Fatal("timed out waiting for block") + } + if !blkrecvd.Cid().Equals(lastBlock.Cid()) { + t.Fatal("received wrong block") + } + + // Make sure Bitswap adds the block to the blockstore + blockInStore, err := instance.Blockstore().Has(lastBlock.Cid()) + if err != nil { + t.Fatal(err) + } + if !blockInStore { + t.Fatal("Block was not added to block store") + } +} + +func TestLargeSwarm(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + numInstances := 100 + numBlocks := 2 + if detectrace.WithRace() { + // when running with the race detector, 500 instances launches + // well over 8k goroutines. This hits a race detector limit. + numInstances = 20 + } else if travis.IsRunning() { + numInstances = 200 + } else { + t.Parallel() + } + PerformDistributionTest(t, numInstances, numBlocks) +} + +func TestLargeFile(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + if !travis.IsRunning() { + t.Parallel() + } + + numInstances := 10 + numBlocks := 100 + PerformDistributionTest(t, numInstances, numBlocks) +} + +func TestLargeFileTwoPeers(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + numInstances := 2 + numBlocks := 100 + PerformDistributionTest(t, numInstances, numBlocks) +} + +func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { + ctx := context.Background() + if testing.Short() { + t.SkipNow() + } + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + instances := ig.Instances(numInstances) + blocks := bg.Blocks(numBlocks) + + t.Log("Give the blocks to the first instance") + + var blkeys []cid.Cid + first := instances[0] + for _, b := range blocks { + blkeys = append(blkeys, b.Cid()) + err := first.Exchange.HasBlock(b) + if err != nil { + t.Fatal(err) + } + } + + t.Log("Distribute!") + + wg := sync.WaitGroup{} + errs := make(chan error) + + for _, inst := range instances[1:] { + wg.Add(1) + go func(inst testinstance.Instance) { + defer wg.Done() + outch, err := inst.Exchange.GetBlocks(ctx, blkeys) + if err != nil { + errs <- err + } + for range outch { + } + }(inst) + } + + go func() { + wg.Wait() + close(errs) + }() + + for err := range errs { + if err != nil { + t.Fatal(err) + } + } + + t.Log("Verify!") + + for _, inst := range instances { + for _, b := range blocks { + if _, err := inst.Blockstore().Get(b.Cid()); err != nil { + t.Fatal(err) + } + } + } +} + +// TODO simplify this test. get to the _essence_! +func TestSendToWantingPeer(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + peers := ig.Instances(2) + peerA := peers[0] + peerB := peers[1] + + t.Logf("Session %v\n", peerA.Peer) + t.Logf("Session %v\n", peerB.Peer) + + waitTime := time.Second * 5 + + alpha := bg.Next() + // peerA requests and waits for block alpha + ctx, cancel := context.WithTimeout(context.Background(), waitTime) + defer cancel() + alphaPromise, err := peerA.Exchange.GetBlocks(ctx, []cid.Cid{alpha.Cid()}) + if err != nil { + t.Fatal(err) + } + + // peerB announces to the network that he has block alpha + err = peerB.Exchange.HasBlock(alpha) + if err != nil { + t.Fatal(err) + } + + // At some point, peerA should get alpha (or timeout) + blkrecvd, ok := <-alphaPromise + if !ok { + t.Fatal("context timed out and broke promise channel!") + } + + if !blkrecvd.Cid().Equals(alpha.Cid()) { + t.Fatal("Wrong block!") + } + +} + +func TestEmptyKey(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bs := ig.Instances(1)[0].Exchange + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + _, err := bs.GetBlock(ctx, cid.Cid{}) + if err != blockstore.ErrNotFound { + t.Error("empty str key should return ErrNotFound") + } +} + +func assertStat(t *testing.T, st *bitswap.Stat, sblks, rblks, sdata, rdata uint64) { + if sblks != st.BlocksSent { + t.Errorf("mismatch in blocks sent: %d vs %d", sblks, st.BlocksSent) + } + + if rblks != st.BlocksReceived { + t.Errorf("mismatch in blocks recvd: %d vs %d", rblks, st.BlocksReceived) + } + + if sdata != st.DataSent { + t.Errorf("mismatch in data sent: %d vs %d", sdata, st.DataSent) + } + + if rdata != st.DataReceived { + t.Errorf("mismatch in data recvd: %d vs %d", rdata, st.DataReceived) + } +} + +func TestBasicBitswap(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test a one node trying to get one block from another") + + instances := ig.Instances(3) + blocks := bg.Blocks(1) + + // First peer has block + err := instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + // Second peer broadcasts want for block CID + // (Received by first and third peers) + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + // When second peer receives block, it should send out a cancel, so third + // peer should no longer keep second peer's want + if err = tu.WaitFor(ctx, func() error { + if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { + return fmt.Errorf("should have no items in other peers wantlist") + } + if len(instances[1].Exchange.GetWantlist()) != 0 { + return fmt.Errorf("shouldnt have anything in wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } + + st0, err := instances[0].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + st1, err := instances[1].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + + st2, err := instances[2].Exchange.Stat() + if err != nil { + t.Fatal(err) + } + + t.Log("stat node 0") + assertStat(t, st0, 1, 0, uint64(len(blk.RawData())), 0) + t.Log("stat node 1") + assertStat(t, st1, 0, 1, 0, uint64(len(blk.RawData()))) + t.Log("stat node 2") + assertStat(t, st2, 0, 0, 0, 0) + + if !bytes.Equal(blk.RawData(), blocks[0].RawData()) { + t.Errorf("blocks aren't equal: expected %v, actual %v", blocks[0].RawData(), blk.RawData()) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +func TestDoubleGet(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test a one node trying to get one block from another") + + instances := ig.Instances(2) + blocks := bg.Blocks(1) + + // NOTE: A race condition can happen here where these GetBlocks requests go + // through before the peers even get connected. This is okay, bitswap + // *should* be able to handle this. + ctx1, cancel1 := context.WithCancel(context.Background()) + blkch1, err := instances[1].Exchange.GetBlocks(ctx1, []cid.Cid{blocks[0].Cid()}) + if err != nil { + t.Fatal(err) + } + + ctx2, cancel2 := context.WithCancel(context.Background()) + defer cancel2() + + blkch2, err := instances[1].Exchange.GetBlocks(ctx2, []cid.Cid{blocks[0].Cid()}) + if err != nil { + t.Fatal(err) + } + + // ensure both requests make it into the wantlist at the same time + time.Sleep(time.Millisecond * 20) + cancel1() + + _, ok := <-blkch1 + if ok { + t.Fatal("expected channel to be closed") + } + + err = instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + select { + case blk, ok := <-blkch2: + if !ok { + t.Fatal("expected to get the block here") + } + t.Log(blk) + case <-time.After(time.Second * 5): + p1wl := instances[0].Exchange.WantlistForPeer(instances[1].Peer) + if len(p1wl) != 1 { + t.Logf("wantlist view didnt have 1 item (had %d)", len(p1wl)) + } else if !p1wl[0].Equals(blocks[0].Cid()) { + t.Logf("had 1 item, it was wrong: %s %s", blocks[0].Cid(), p1wl[0]) + } else { + t.Log("had correct wantlist, somehow") + } + t.Fatal("timed out waiting on block") + } + + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +func TestWantlistCleanup(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + instances := ig.Instances(2) + instance := instances[0] + bswap := instance.Exchange + blocks := bg.Blocks(20) + + var keys []cid.Cid + for _, b := range blocks { + keys = append(keys, b.Cid()) + } + + // Once context times out, key should be removed from wantlist + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + _, err := bswap.GetBlock(ctx, keys[0]) + if err != context.DeadlineExceeded { + t.Fatal("shouldnt have fetched any blocks") + } + + time.Sleep(time.Millisecond * 50) + + if len(bswap.GetWantHaves()) > 0 { + t.Fatal("should not have anyting in wantlist") + } + + // Once context times out, keys should be removed from wantlist + ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + _, err = bswap.GetBlocks(ctx, keys[:10]) + if err != nil { + t.Fatal(err) + } + + <-ctx.Done() + time.Sleep(time.Millisecond * 50) + + if len(bswap.GetWantHaves()) > 0 { + t.Fatal("should not have anyting in wantlist") + } + + // Send want for single block, with no timeout + _, err = bswap.GetBlocks(context.Background(), keys[:1]) + if err != nil { + t.Fatal(err) + } + + // Send want for 10 blocks + ctx, cancel = context.WithCancel(context.Background()) + _, err = bswap.GetBlocks(ctx, keys[10:]) + if err != nil { + t.Fatal(err) + } + + // Even after 50 milli-seconds we haven't explicitly cancelled anything + // and no timeouts have expired, so we should have 11 want-haves + time.Sleep(time.Millisecond * 50) + if len(bswap.GetWantHaves()) != 11 { + t.Fatal("should have 11 keys in wantlist") + } + + // Cancel the timeout for the request for 10 blocks. This should remove + // the want-haves + cancel() + + // Once the cancel is processed, we are left with the request for 1 block + time.Sleep(time.Millisecond * 50) + if !(len(bswap.GetWantHaves()) == 1 && bswap.GetWantHaves()[0] == keys[0]) { + t.Fatal("should only have keys[0] in wantlist") + } +} + +func assertLedgerMatch(ra, rb *decision.Receipt) error { + if ra.Sent != rb.Recv { + return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d sent vs %d recvd", ra.Sent, rb.Recv) + } + + if ra.Recv != rb.Sent { + return fmt.Errorf("mismatch in ledgers (exchanged bytes): %d recvd vs %d sent", ra.Recv, rb.Sent) + } + + if ra.Exchanged != rb.Exchanged { + return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) + } + + return nil +} + +func assertLedgerEqual(ra, rb *decision.Receipt) error { + if ra.Value != rb.Value { + return fmt.Errorf("mismatch in ledgers (value/debt ratio): %f vs %f ", ra.Value, rb.Value) + } + + if ra.Sent != rb.Sent { + return fmt.Errorf("mismatch in ledgers (sent bytes): %d vs %d", ra.Sent, rb.Sent) + } + + if ra.Recv != rb.Recv { + return fmt.Errorf("mismatch in ledgers (recvd bytes): %d vs %d", ra.Recv, rb.Recv) + } + + if ra.Exchanged != rb.Exchanged { + return fmt.Errorf("mismatch in ledgers (exchanged blocks): %d vs %d ", ra.Exchanged, rb.Exchanged) + } + + return nil +} + +func newReceipt(sent, recv, exchanged uint64) *decision.Receipt { + return &decision.Receipt{ + Peer: "test", + Value: float64(sent) / (1 + float64(recv)), + Sent: sent, + Recv: recv, + Exchanged: exchanged, + } +} + +func TestBitswapLedgerOneWay(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test ledgers match when one peer sends block to another") + + instances := ig.Instances(2) + blocks := bg.Blocks(1) + err := instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + + // compare peer ledger receipts + err = assertLedgerMatch(ra, rb) + if err != nil { + t.Fatal(err) + } + + // check that receipts have intended values + ratest := newReceipt(1, 0, 1) + err = assertLedgerEqual(ratest, ra) + if err != nil { + t.Fatal(err) + } + rbtest := newReceipt(0, 1, 1) + err = assertLedgerEqual(rbtest, rb) + if err != nil { + t.Fatal(err) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +func TestBitswapLedgerTwoWay(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + t.Log("Test ledgers match when two peers send one block to each other") + + instances := ig.Instances(2) + blocks := bg.Blocks(2) + err := instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + err = instances[1].Exchange.HasBlock(blocks[1]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + _, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + blk, err := instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) + if err != nil { + t.Fatal(err) + } + + ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + + // compare peer ledger receipts + err = assertLedgerMatch(ra, rb) + if err != nil { + t.Fatal(err) + } + + // check that receipts have intended values + rtest := newReceipt(1, 1, 2) + err = assertLedgerEqual(rtest, ra) + if err != nil { + t.Fatal(err) + } + + err = assertLedgerEqual(rtest, rb) + if err != nil { + t.Fatal(err) + } + + t.Log(blk) + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} + +type testingScoreLedger struct { + scorePeer deciface.ScorePeerFunc + started chan struct{} + closed chan struct{} +} + +func newTestingScoreLedger() *testingScoreLedger { + return &testingScoreLedger{ + nil, + make(chan struct{}), + make(chan struct{}), + } +} + +func (tsl *testingScoreLedger) GetReceipt(p peer.ID) *deciface.Receipt { + return nil +} +func (tsl *testingScoreLedger) AddToSentBytes(p peer.ID, n int) {} +func (tsl *testingScoreLedger) AddToReceivedBytes(p peer.ID, n int) {} +func (tsl *testingScoreLedger) PeerConnected(p peer.ID) {} +func (tsl *testingScoreLedger) PeerDisconnected(p peer.ID) {} +func (tsl *testingScoreLedger) Start(scorePeer deciface.ScorePeerFunc) { + tsl.scorePeer = scorePeer + close(tsl.started) +} +func (tsl *testingScoreLedger) Stop() { + close(tsl.closed) +} + +// Tests start and stop of a custom decision logic +func TestWithScoreLedger(t *testing.T) { + tsl := newTestingScoreLedger() + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + bsOpts := []bitswap.Option{bitswap.WithScoreLedger(tsl)} + ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) + defer ig.Close() + i := ig.Next() + defer i.Exchange.Close() + + select { + case <-tsl.started: + if tsl.scorePeer == nil { + t.Fatal("Expected the score function to be initialized") + } + case <-time.After(time.Second * 5): + t.Fatal("Expected the score ledger to be started within 5s") + } + + i.Exchange.Close() + select { + case <-tsl.closed: + case <-time.After(time.Second * 5): + t.Fatal("Expected the score ledger to be closed within 5s") + } +} + +type logItem struct { + dir byte + pid peer.ID + msg bsmsg.BitSwapMessage +} +type mockWireTap struct { + mu sync.Mutex + log []logItem +} + +func (m *mockWireTap) MessageReceived(p peer.ID, msg bsmsg.BitSwapMessage) { + m.mu.Lock() + defer m.mu.Unlock() + m.log = append(m.log, logItem{'r', p, msg}) +} +func (m *mockWireTap) MessageSent(p peer.ID, msg bsmsg.BitSwapMessage) { + m.mu.Lock() + defer m.mu.Unlock() + m.log = append(m.log, logItem{'s', p, msg}) +} + +func (m *mockWireTap) getLog() []logItem { + m.mu.Lock() + defer m.mu.Unlock() + return m.log[:len(m.log):len(m.log)] +} + +func TestWireTap(t *testing.T) { + net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + defer ig.Close() + bg := blocksutil.NewBlockGenerator() + + instances := ig.Instances(3) + blocks := bg.Blocks(2) + + // Install WireTap + wiretap := new(mockWireTap) + bitswap.EnableWireTap(wiretap)(instances[0].Exchange) + + // First peer has block + err := instances[0].Exchange.HasBlock(blocks[0]) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + // Second peer broadcasts want for block CID + // (Received by first and third peers) + _, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + if err != nil { + t.Fatal(err) + } + + // When second peer receives block, it should send out a cancel, so third + // peer should no longer keep second peer's want + if err = tu.WaitFor(ctx, func() error { + if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { + return fmt.Errorf("should have no items in other peers wantlist") + } + if len(instances[1].Exchange.GetWantlist()) != 0 { + return fmt.Errorf("shouldnt have anything in wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } + + log := wiretap.getLog() + + // After communication, 3 messages should be logged via WireTap + if l := len(log); l != 3 { + t.Fatal("expected 3 items logged via WireTap, found", l) + } + + // Received: 'Have' + if log[0].dir != 'r' { + t.Error("expected message to be received") + } + if log[0].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", log[0].pid) + } + if l := len(log[0].msg.Wantlist()); l != 1 { + t.Fatal("expected 1 entry in Wantlist, found", l) + } + if log[0].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Have { + t.Error("expected WantType equal to 'Have', found 'Block'") + } + + // Sent: Block + if log[1].dir != 's' { + t.Error("expected message to be sent") + } + if log[1].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", log[1].pid) + } + if l := len(log[1].msg.Blocks()); l != 1 { + t.Fatal("expected 1 entry in Blocks, found", l) + } + if log[1].msg.Blocks()[0].Cid() != blocks[0].Cid() { + t.Error("wrong block Cid") + } + + // Received: 'Cancel' + if log[2].dir != 'r' { + t.Error("expected message to be received") + } + if log[2].pid != instances[1].Peer { + t.Error("expected peer", instances[1].Peer, ", found", log[2].pid) + } + if l := len(log[2].msg.Wantlist()); l != 1 { + t.Fatal("expected 1 entry in Wantlist, found", l) + } + if log[2].msg.Wantlist()[0].WantType != pb.Message_Wantlist_Block { + t.Error("expected WantType equal to 'Block', found 'Have'") + } + if log[2].msg.Wantlist()[0].Cancel != true { + t.Error("expected entry with Cancel set to 'true'") + } + + // After disabling WireTap, no new messages are logged + bitswap.DisableWireTap()(instances[0].Exchange) + + err = instances[0].Exchange.HasBlock(blocks[1]) + if err != nil { + t.Fatal(err) + } + _, err = instances[1].Exchange.GetBlock(ctx, blocks[1].Cid()) + if err != nil { + t.Fatal(err) + } + if err = tu.WaitFor(ctx, func() error { + if len(instances[1].Exchange.GetWantlist()) != 0 { + return fmt.Errorf("shouldnt have anything in wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } + + log = wiretap.getLog() + + if l := len(log); l != 3 { + t.Fatal("expected 3 items logged via WireTap, found", l) + } + + for _, inst := range instances { + err := inst.Exchange.Close() + if err != nil { + t.Fatal(err) + } + } +} diff --git a/bitswap_with_sessions_test.go b/bitswap_with_sessions_test.go new file mode 100644 index 0000000000000000000000000000000000000000..587e7a6a183242afcef68c63e12fa0565c3608ac --- /dev/null +++ b/bitswap_with_sessions_test.go @@ -0,0 +1,474 @@ +package bitswap_test + +import ( + "context" + "fmt" + "testing" + "time" + + bitswap "gitlab.dms3.io/dms3/go-bitswap" + bssession "gitlab.dms3.io/dms3/go-bitswap/internal/session" + testinstance "gitlab.dms3.io/dms3/go-bitswap/testinstance" + tn "gitlab.dms3.io/dms3/go-bitswap/testnet" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + blocksutil "gitlab.dms3.io/dms3/go-dms3-blocksutil" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + mockrouting "gitlab.dms3.io/dms3/go-dms3-routing/mock" + tu "gitlab.dms3.io/p2p/go-p2p-testing/etc" +) + +func TestBasicSessions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + block := bgen.Next() + inst := ig.Instances(2) + + a := inst[0] + b := inst[1] + + // Add a block to Peer B + if err := b.Blockstore().Put(block); err != nil { + t.Fatal(err) + } + + // Create a session on Peer A + sesa := a.Exchange.NewSession(ctx) + + // Get the block + blkout, err := sesa.GetBlock(ctx, block.Cid()) + if err != nil { + t.Fatal(err) + } + + if !blkout.Cid().Equals(block.Cid()) { + t.Fatal("got wrong block") + } +} + +func assertBlockLists(got, exp []blocks.Block) error { + if len(got) != len(exp) { + return fmt.Errorf("got wrong number of blocks, %d != %d", len(got), len(exp)) + } + + h := cid.NewSet() + for _, b := range got { + h.Add(b.Cid()) + } + for _, b := range exp { + if !h.Has(b.Cid()) { + return fmt.Errorf("didnt have: %s", b.Cid()) + } + } + return nil +} + +func TestSessionBetweenPeers(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(10) + + // Add 101 blocks to Peer A + blks := bgen.Blocks(101) + if err := inst[0].Blockstore().PutMany(blks); err != nil { + t.Fatal(err) + } + + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + // Create a session on Peer B + ses := inst[1].Exchange.NewSession(ctx) + if _, err := ses.GetBlock(ctx, cids[0]); err != nil { + t.Fatal(err) + } + blks = blks[1:] + cids = cids[1:] + + // Fetch blocks with the session, 10 at a time + for i := 0; i < 10; i++ { + ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { + t.Fatal(err) + } + } + + // Uninvolved nodes should receive + // - initial broadcast want-have of root block + // - CANCEL (when Peer A receives the root block from Peer B) + for _, is := range inst[2:] { + stat, err := is.Exchange.Stat() + if err != nil { + t.Fatal(err) + } + if stat.MessagesReceived > 2 { + t.Fatal("uninvolved nodes should only receive two messages", stat.MessagesReceived) + } + } +} + +func TestSessionSplitFetch(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(11) + + // Add 10 distinct blocks to each of 10 peers + blks := bgen.Blocks(100) + for i := 0; i < 10; i++ { + if err := inst[i].Blockstore().PutMany(blks[i*10 : (i+1)*10]); err != nil { + t.Fatal(err) + } + } + + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + // Create a session on the remaining peer and fetch all the blocks 10 at a time + ses := inst[10].Exchange.NewSession(ctx).(*bssession.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + for i := 0; i < 10; i++ { + ch, err := ses.GetBlocks(ctx, cids[i*10:(i+1)*10]) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks[i*10:(i+1)*10]); err != nil { + t.Fatal(err) + } + } +} + +func TestFetchNotConnected(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + other := ig.Next() + + // Provide 10 blocks on Peer A + blks := bgen.Blocks(10) + for _, block := range blks { + if err := other.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + // Request blocks with Peer B + // Note: Peer A and Peer B are not initially connected, so this tests + // that Peer B will search for and find Peer A + thisNode := ig.Next() + ses := thisNode.Exchange.NewSession(ctx).(*bssession.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + ch, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + + var got []blocks.Block + for b := range ch { + got = append(got, b) + } + if err := assertBlockLists(got, blks); err != nil { + t.Fatal(err) + } +} + +func TestFetchAfterDisconnect(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{ + bitswap.ProviderSearchDelay(10 * time.Millisecond), + bitswap.RebroadcastDelay(delay.Fixed(15 * time.Millisecond)), + }) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + inst := ig.Instances(2) + peerA := inst[0] + peerB := inst[1] + + // Provide 5 blocks on Peer A + blks := bgen.Blocks(10) + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + firstBlks := blks[:5] + for _, block := range firstBlks { + if err := peerA.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + // Request all blocks with Peer B + ses := peerB.Exchange.NewSession(ctx).(*bssession.Session) + ses.SetBaseTickDelay(time.Millisecond * 10) + + ch, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + + // Should get first 5 blocks + var got []blocks.Block + for i := 0; i < 5; i++ { + b := <-ch + got = append(got, b) + } + + if err := assertBlockLists(got, blks[:5]); err != nil { + t.Fatal(err) + } + + // Break connection + err = peerA.Adapter.DisconnectFrom(ctx, peerB.Peer) + if err != nil { + t.Fatal(err) + } + + time.Sleep(20 * time.Millisecond) + + // Provide remaining blocks + lastBlks := blks[5:] + for _, block := range lastBlks { + if err := peerA.Exchange.HasBlock(block); err != nil { + t.Fatal(err) + } + } + + // Peer B should call FindProviders() and find Peer A + + // Should get last 5 blocks + for i := 0; i < 5; i++ { + select { + case b := <-ch: + got = append(got, b) + case <-ctx.Done(): + } + } + + if err := assertBlockLists(got, blks); err != nil { + t.Fatal(err) + } +} + +func TestInterestCacheOverflow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(2049) + inst := ig.Instances(2) + + a := inst[0] + b := inst[1] + + ses := a.Exchange.NewSession(ctx) + zeroch, err := ses.GetBlocks(ctx, []cid.Cid{blks[0].Cid()}) + if err != nil { + t.Fatal(err) + } + + var restcids []cid.Cid + for _, blk := range blks[1:] { + restcids = append(restcids, blk.Cid()) + } + + restch, err := ses.GetBlocks(ctx, restcids) + if err != nil { + t.Fatal(err) + } + + // wait to ensure that all the above cids were added to the sessions cache + time.Sleep(time.Millisecond * 50) + + if err := b.Exchange.HasBlock(blks[0]); err != nil { + t.Fatal(err) + } + + select { + case blk, ok := <-zeroch: + if ok && blk.Cid().Equals(blks[0].Cid()) { + // success! + } else { + t.Fatal("failed to get the block") + } + case <-restch: + t.Fatal("should not get anything on restch") + case <-time.After(time.Second * 5): + t.Fatal("timed out waiting for block") + } +} + +func TestPutAfterSessionCacheEvict(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(2500) + inst := ig.Instances(1) + + a := inst[0] + + ses := a.Exchange.NewSession(ctx) + + var allcids []cid.Cid + for _, blk := range blks[1:] { + allcids = append(allcids, blk.Cid()) + } + + blkch, err := ses.GetBlocks(ctx, allcids) + if err != nil { + t.Fatal(err) + } + + // wait to ensure that all the above cids were added to the sessions cache + time.Sleep(time.Millisecond * 50) + + if err := a.Exchange.HasBlock(blks[17]); err != nil { + t.Fatal(err) + } + + select { + case <-blkch: + case <-time.After(time.Millisecond * 50): + t.Fatal("timed out waiting for block") + } +} + +func TestMultipleSessions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blk := bgen.Blocks(1)[0] + inst := ig.Instances(2) + + a := inst[0] + b := inst[1] + + ctx1, cancel1 := context.WithCancel(ctx) + ses := a.Exchange.NewSession(ctx1) + + blkch, err := ses.GetBlocks(ctx, []cid.Cid{blk.Cid()}) + if err != nil { + t.Fatal(err) + } + cancel1() + + ses2 := a.Exchange.NewSession(ctx) + blkch2, err := ses2.GetBlocks(ctx, []cid.Cid{blk.Cid()}) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 10) + if err := b.Exchange.HasBlock(blk); err != nil { + t.Fatal(err) + } + + select { + case <-blkch2: + case <-time.After(time.Second * 20): + t.Fatal("bad juju") + } + _ = blkch +} + +func TestWantlistClearsOnCancel(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + vnet := getVirtualNetwork() + ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + defer ig.Close() + bgen := blocksutil.NewBlockGenerator() + + blks := bgen.Blocks(10) + var cids []cid.Cid + for _, blk := range blks { + cids = append(cids, blk.Cid()) + } + + inst := ig.Instances(1) + + a := inst[0] + + ctx1, cancel1 := context.WithCancel(ctx) + ses := a.Exchange.NewSession(ctx1) + + _, err := ses.GetBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + cancel1() + + if err := tu.WaitFor(ctx, func() error { + if len(a.Exchange.GetWantlist()) > 0 { + return fmt.Errorf("expected empty wantlist") + } + return nil + }); err != nil { + t.Fatal(err) + } +} diff --git a/decision/decision.go b/decision/decision.go new file mode 100644 index 0000000000000000000000000000000000000000..cf17c27b9663f36b4cfcce00faeedf3de297807e --- /dev/null +++ b/decision/decision.go @@ -0,0 +1,12 @@ +package decision + +import intdec "gitlab.dms3.io/dms3/go-bitswap/internal/decision" + +// Expose Receipt externally +type Receipt = intdec.Receipt + +// Expose ScoreLedger externally +type ScoreLedger = intdec.ScoreLedger + +// Expose ScorePeerFunc externally +type ScorePeerFunc = intdec.ScorePeerFunc diff --git a/docs/go-bitswap.png b/docs/go-bitswap.png new file mode 100644 index 0000000000000000000000000000000000000000..805bf6562a822c8cd68ed310ff49f07e4be45ba2 Binary files /dev/null and b/docs/go-bitswap.png differ diff --git a/docs/go-bitswap.puml b/docs/go-bitswap.puml new file mode 100644 index 0000000000000000000000000000000000000000..af9134d7e5a6e49e2fea51c4d919a8198ab6c83b --- /dev/null +++ b/docs/go-bitswap.puml @@ -0,0 +1,49 @@ +@startuml Bitswap Components + +node "Top Level Interface" { + [Bitswap] +} + +node "Sending Blocks" { + [Bitswap] --* [Engine] + [Engine] -left-* [Ledger] + [Engine] -right-* [PeerTaskQueue] + [Engine] --> [TaskWorker (workers.go)] +} + +node "Providing" { + [Bitswap] --* [Provide Collector (workers.go)] + [Provide Collector (workers.go)] --* [Provide Worker (workers.go)] +} + +node "Finding Providers" { + [Bitswap] --* [ProvideQueryManager] +} + +node "Sessions (smart requests)" { + [Bitswap] --* [SessionManager] + [SessionManager] --> [SessionInterestManager] + [SessionManager] --o [Session] + [SessionManager] --> [BlockPresenceManager] + [Session] --* [sessionWantSender] + [Session] --* [SessionPeerManager] + [Session] --> [ProvideQueryManager] + [Session] --* [sessionWants] + [Session] --> [SessionInterestManager] + [sessionWantSender] --> [BlockPresenceManager] +} + +node "Requesting Blocks" { + [SessionManager] --> [PeerManager] + [sessionWantSender] --> [PeerManager] + [PeerManager] --* [MessageQueue] +} + +node "Network" { + [BitSwapNetwork] + [MessageQueue] --> [BitSwapNetwork] + [ProvideQueryManager] --> [BitSwapNetwork] + [TaskWorker (workers.go)] --> [BitSwapNetwork] + [Provide Worker (workers.go)] --> [BitSwapNetwork] +} +@enduml \ No newline at end of file diff --git a/docs/how-bitswap-works.md b/docs/how-bitswap-works.md new file mode 100644 index 0000000000000000000000000000000000000000..303b05763ab757ff76b717bd0ca893956bd2f938 --- /dev/null +++ b/docs/how-bitswap-works.md @@ -0,0 +1,143 @@ +How Bitswap Works +================= + +When a client requests blocks, Bitswap sends the CID of those blocks to its peers as "wants". When Bitswap receives a "want" from a peer, it responds with the corresponding block. + +### Requesting Blocks + +#### Sessions + +Bitswap Sessions allow the client to make related requests to the same group of peers. For example typically requests to fetch all the blocks in a file would be made with a single session. + +#### Discovery + +To discover which peers have a block, Bitswap broadcasts a `want-have` message to all peers it is connected to asking if they have the block. + +Any peers that have the block respond with a `HAVE` message. They are added to the Session. + +If no connected peers have the block, Bitswap queries the DHT to find peers that have the block. + +### Wants + +When the client requests a block, Bitswap sends a `want-have` message with the block CID to all peers in the Session to ask who has the block. + +Bitswap simultaneously sends a `want-block` message to one of the peers in the Session to request the block. If the peer does not have the block, it responds with a `DONT_HAVE` message. In that case Bitswap selects another peer and sends the `want-block` to that peer. + +If no peers have the block, Bitswap broadcasts a `want-have` to all connected peers, and queries the DHT to find peers that have the block. + +#### Peer Selection + +Bitswap uses a probabilistic algorithm to select which peer to send `want-block` to, favouring peers that +- sent `HAVE` for the block +- were discovered as providers of the block in the DHT +- were first to send blocks to previous session requests + +The selection algorithm includes some randomness so as to allow peers that are discovered later, but are more responsive, to rise in the ranking. + +#### Periodic Search Widening + +Periodically the Bitswap Session selects a random CID from the list of "pending wants" (wants that have been sent but for which no block has been received). Bitswap broadcasts a `want-have` to all connected peers and queries the DHT for the CID. + +### Serving Blocks + +#### Processing Requests + +When Bitswap receives a `want-have` it checks if the block is in the local blockstore. + +If the block is in the local blockstore Bitswap responds with `HAVE`. If the block is small Bitswap sends the block itself instead of `HAVE`. + +If the block is not in the local blockstore, Bitswap checks the `send-dont-have` flag on the request. If `send-dont-have` is true, Bitswap sends `DONT_HAVE`. Otherwise it does not respond. + +#### Processing Incoming Blocks + +When Bitswap receives a block, it checks to see if any peers sent `want-have` or `want-block` for the block. If so it sends `HAVE` or the block itself to those peers. + +#### Priority + +Bitswap keeps requests from each peer in separate queues, ordered by the priority specified in the request message. + +To select which peer to send the next response to, Bitswap chooses the peer with the least amount of data in its send queue. That way it will tend to "keep peers busy" by always keeping some data in each peer's send queue. + + +Implementation +============== + +![Bitswap Components](./go-bitswap.png) + +### Bitswap + +The Bitswap class receives incoming messages and implements the Exchange API. + +When a message is received, Bitswap +- Records some statistics about the message +- Informs the Engine of any new wants + So that the Engine can send responses to the wants +- Informs the Engine of any received blocks + So that the Engine can send the received blocks to any peers that want them +- Informs the SessionManager of received blocks, HAVEs and DONT_HAVEs + So that the SessionManager can inform interested sessions + +When the client makes an API call, Bitswap creates a new Session and calls the corresponding method (eg `GetBlocks()`). + +### Sending Blocks + +When the Engine is informed of new wants it +- Adds the wants to the Ledger (peer A wants block with CID Qmhash...) +- Checks the blockstore for the corresponding blocks, and adds a task to the PeerTaskQueue + - If the blockstore does not have a wanted block, adds a `DONT_HAVE` task + - If the blockstore has the block + - for a `want-have` adds a `HAVE` task + - for a `want-block` adds a `block` task + +When the Engine is informed of new blocks it checks the Ledger to see if any peers want information about those blocks. +- For each block + - For each peer that sent a `want-have` for the corresponding block + Adds a `HAVE` task to the PeerTaskQueue + - For each peer that sent a `want-block` for the corresponding block + Adds a `block` task to the PeerTaskQueue + +The Engine periodically pops tasks off the PeerTaskQueue, and creates a message with `blocks`, `HAVEs` and `DONT_HAVEs`. +The PeerTaskQueue prioritizes tasks such that the peers with the least amount of data in their send queue are highest priority, so as to "keep peers busy". + +### Requesting Blocks + +When the SessionManager is informed of a new message, it +- informs the BlockPresenceManager + The BlockPresenceManager keeps track of which peers have sent HAVES and DONT_HAVEs for each block +- informs the Sessions that are interested in the received blocks and wants +- informs the PeerManager of received blocks + The PeerManager checks if any wants were send to a peer for the received blocks. If so it sends a `CANCEL` message to those peers. + +### Sessions + +The Session starts in "discovery" mode. This means it doesn't have any peers yet, and needs to discover which peers have the blocks it wants. + +When the client initially requests blocks from a Session, the Session +- informs the SessionInterestManager that it is interested in the want +- informs the sessionWantManager of the want +- tells the PeerManager to broadcast a `want-have` to all connected peers so as to discover which peers have the block +- queries the ProviderQueryManager to discover which peers have the block + +When the session receives a message with `HAVE` or a `block`, it informs the SessionPeerManager. The SessionPeerManager keeps track of all peers in the session. +When the session receives a message with a `block` it informs the SessionInterestManager. + +Once the session has peers it is no longer in "discovery" mode. When the client requests subsequent blocks the Session informs the sessionWantSender. The sessionWantSender tells the PeerManager to send `want-have` and `want-block` to peers in the session. + +For each block that the Session wants, the sessionWantSender decides which peer is most likely to have a block by checking with the BlockPresenceManager which peers have sent a `HAVE` for the block. If no peers or multiple peers have sent `HAVE`, a peer is chosen probabilistically according to which how many times each peer was first to send a block in response to previous wants requested by the Session. The sessionWantSender sends a single "optimistic" `want-block` to the chosen peer, and sends `want-have` to all other peers in the Session. +When a peer responds with `DONT_HAVE`, the Session sends `want-block` to the next best peer, and so on until the block is received. + +### PeerManager + +The PeerManager creates a MessageQueue for each peer that connects to Bitswap. It remembers which `want-have` / `want-block` has been sent to each peer, and directs any new wants to the correct peer. +The MessageQueue groups together wants into a message, and sends the message to the peer. It monitors for timeouts and simulates a `DONT_HAVE` response if a peer takes too long to respond. + +### Finding Providers + +When bitswap can't find a connected peer who already has the block it wants, it falls back to querying a content routing system (a DHT in IPFS's case) to try to locate a peer with the block. + +Bitswap routes these requests through the ProviderQueryManager system, which rate-limits these requests and also deduplicates in-process requests. + +### Providing + +As a bitswap client receives blocks, by default it announces them on the provided content routing system (again, a DHT in most cases). This behaviour can be disabled by passing `bitswap.ProvideEnabled(false)` as a parameter when initializing Bitswap. IPFS currently has its own experimental provider system ([go-ipfs-provider](https://github.com/ipfs/go-ipfs-provider)) which will eventually replace Bitswap's system entirely. + diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..5f4268a943eb84cb240bc8513f3aaf99b1ea7bef --- /dev/null +++ b/go.mod @@ -0,0 +1,33 @@ +module gitlab.dms3.io/dms3/go-bitswap + +require ( + github.com/cskr/pubsub v1.0.2 + github.com/gogo/protobuf v1.3.2 + github.com/google/uuid v1.1.2 + github.com/jbenet/goprocess v0.1.4 + github.com/multiformats/go-multistream v0.2.1 + gitlab.dms3.io/dms3/go-block-format v0.0.3 + gitlab.dms3.io/dms3/go-cid v0.0.3 + gitlab.dms3.io/dms3/go-datastore v0.0.2 + gitlab.dms3.io/dms3/go-detect-race v0.0.3 + gitlab.dms3.io/dms3/go-dms3-blockstore v0.0.2 + gitlab.dms3.io/dms3/go-dms3-blocksutil v0.0.2 + gitlab.dms3.io/dms3/go-dms3-delay v0.0.2 + gitlab.dms3.io/dms3/go-dms3-exchange-interface v0.0.2 + gitlab.dms3.io/dms3/go-dms3-routing v0.0.2 + gitlab.dms3.io/dms3/go-dms3-util v0.0.2 + gitlab.dms3.io/dms3/go-log v0.0.2 + gitlab.dms3.io/dms3/go-metrics-interface v0.0.2 + gitlab.dms3.io/dms3/go-peertaskqueue v0.0.1 + gitlab.dms3.io/mf/go-multiaddr v0.0.2 + gitlab.dms3.io/p2p/go-buffer-pool v0.0.1 + gitlab.dms3.io/p2p/go-msgio v0.0.1 + gitlab.dms3.io/p2p/go-p2p v0.0.4 + gitlab.dms3.io/p2p/go-p2p-core v0.0.2 + gitlab.dms3.io/p2p/go-p2p-loggables v0.0.2 + gitlab.dms3.io/p2p/go-p2p-netutil v0.0.2 + gitlab.dms3.io/p2p/go-p2p-testing v0.0.2 + go.uber.org/zap v1.16.0 +) + +go 1.15 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..33eab410a2bdfc80adff6b4624dd1ae0397757b5 --- /dev/null +++ b/go.sum @@ -0,0 +1,905 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= +github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= +github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid/v2 v2.0.4 h1:g0I61F2K2DjRHz1cnxlkNSBIaePVoJIjjnHui8QHbiw= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= +github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucas-clemente/quic-go v0.21.1 h1:uuhCcu885TE9u/piPYMChI/yqA1lXfaLUEx8uCMxf8w= +github.com/lucas-clemente/quic-go v0.21.1/go.mod h1:U9kFi5LKbNIlU30dkuM9vxmTxWq4Bvzee/MjBI+07UA= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls-go1-15 v0.1.4 h1:RehYMOyRW8hPVEja1KBVsFVNSm35Jj9Mvs5yNoZZ28A= +github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.3 h1:XEZ1xGorVy9u+lJq+WXNE+hiqRYLNvJGYmwfwKQN2gU= +github.com/marten-seemann/qtls-go1-16 v0.1.3/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.0-beta.1.2 h1:SficYjyOthSrliKI+EaFuXS6HqSsX3dkY9AqxAAjBjw= +github.com/marten-seemann/qtls-go1-17 v0.1.0-beta.1.2/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15 h1:hWOPdrNqDjwHDx82vsYGSDZNyktOJJ2dzZJzFkOV1jM= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.1 h1:R5exp4cKvGlePuxg/bn4cnV53K4DxCe+uldxs7QzfrE= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0 h1:WCVKW7aL6LEe1uryfI9dnEc2ZqNB1Fn0ok930v0iL1Y= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +gitlab.dms3.io/dms3/bbloom v0.0.2 h1:QUy22f3j+xsQm4osUxvE5xoJ2N9gW7qU59RQx9CKcyY= +gitlab.dms3.io/dms3/bbloom v0.0.2/go.mod h1:LcQmLQuryIw3kDzD+bcbojAojWtYni30BeSJ9wuw8qg= +gitlab.dms3.io/dms3/go-block-format v0.0.3 h1:3gPZUNu5UavbZxbMNd4NDTLG53O7MbmYSwntQwZGKxI= +gitlab.dms3.io/dms3/go-block-format v0.0.3/go.mod h1:IbZAFf3fFJa8YW98gaSg/iO9GKtsgVs6+N8OVHmFw1I= +gitlab.dms3.io/dms3/go-cid v0.0.3 h1:5qZ1sl1Bi26naLz7Vsc8fjWcJKX8bR6njt3WPCteXac= +gitlab.dms3.io/dms3/go-cid v0.0.3/go.mod h1:qT/Q1NZD31UnWQ+rwsQgzGrrsQhpq7dYSlXf7ulDgtk= +gitlab.dms3.io/dms3/go-datastore v0.0.2 h1:cLsmDEiDz+onFnbPhyLYuPECw+tJEnvpil958ps0WHw= +gitlab.dms3.io/dms3/go-datastore v0.0.2/go.mod h1:IOybd0feVxRiGt5idG/FzjdeK6itS0uaMgD42CmANFo= +gitlab.dms3.io/dms3/go-detect-race v0.0.3 h1:z174ts1AmdR+mTgGbU8WOUU1nA03UGogru5C1HlykTk= +gitlab.dms3.io/dms3/go-detect-race v0.0.3/go.mod h1:WWTYJY6zfweMdzFsbxQ2Vxzn6mvvnGRkUT+l7duOI8Q= +gitlab.dms3.io/dms3/go-dms3-blockstore v0.0.2 h1:QSMJaz2sLPgOMVxM0G9vZpn78mj/Zfoj2pqt7ho/nzs= +gitlab.dms3.io/dms3/go-dms3-blockstore v0.0.2/go.mod h1:Ycu8BsrWzBVoWJ39av9eam8ycOsK2GWNmgURSVpNyU8= +gitlab.dms3.io/dms3/go-dms3-blocksutil v0.0.2 h1:hFxMxQBndzHIu32a7k/5QVmw54NQyco07elcT+ur9y8= +gitlab.dms3.io/dms3/go-dms3-blocksutil v0.0.2/go.mod h1:VCMaMsjWRQ+zzxhyjgd9IO9owUQVYz9G81jEzJNFk2E= +gitlab.dms3.io/dms3/go-dms3-delay v0.0.2 h1:+kwb2jAGjNAqzxpGl2EJYhD2Gadui5XoxHkR8AGo0aQ= +gitlab.dms3.io/dms3/go-dms3-delay v0.0.2/go.mod h1:WPBu+WALc2GAYE3KfI5QM+r3dX9gv7wHJVfTFlYBwDg= +gitlab.dms3.io/dms3/go-dms3-ds-help v0.0.3 h1:uSvksVjFcrItUWet55Lokpe5hez+zp4UE6aXeAZp73A= +gitlab.dms3.io/dms3/go-dms3-ds-help v0.0.3/go.mod h1:mobOVtN8rkzaq3ZY/JSnaZxSL0A2N6gQlyGuAk9e6ug= +gitlab.dms3.io/dms3/go-dms3-exchange-interface v0.0.2 h1:ZFXSma5QApRyzjhZf83r5vEHjHXzL6NLuZun42rTxeQ= +gitlab.dms3.io/dms3/go-dms3-exchange-interface v0.0.2/go.mod h1:0Gqk+RrSWyhBlDdjLgRW2M9VegzA1OhoSm2Vf77V/Fg= +gitlab.dms3.io/dms3/go-dms3-pq v0.0.3 h1:8w+KbYDeMxwLOPQBgcuqYYjcl4Ri/T11qgIdPTFqyYE= +gitlab.dms3.io/dms3/go-dms3-pq v0.0.3/go.mod h1:bHQPQTHCFzc/kBEKestXZs55RbfQewk1ZKKeNLkvNyA= +gitlab.dms3.io/dms3/go-dms3-routing v0.0.2 h1:VYmBd5dghf9t0IWt2sEUW2PNkPTaQesJ/tEM6hZ6ovI= +gitlab.dms3.io/dms3/go-dms3-routing v0.0.2/go.mod h1:c65whh6CW/rzGCgxUyfnKCZ3F9iZe2eg5GynshF6GQQ= +gitlab.dms3.io/dms3/go-dms3-util v0.0.2 h1:xXHeLaht5szd3QPdQp5KcfmXbIyRvRPSdQnMkJPRgPo= +gitlab.dms3.io/dms3/go-dms3-util v0.0.2/go.mod h1:5hPwxzo5zK4NeHE/anWIQGHcIoG7aTl9/Pp0j2zg0l8= +gitlab.dms3.io/dms3/go-ds-badger v0.0.2/go.mod h1:lPg8sF7fBTjUvh6edXwBwL7r7bacG3oV0rHiNNfd7r4= +gitlab.dms3.io/dms3/go-ds-leveldb v0.0.2/go.mod h1:SWw3zbVmzPzYm/5AGDINZBa3pR8YEZoxdpqvvPkUO/U= +gitlab.dms3.io/dms3/go-log v0.0.2 h1:vuNG5qmx6P9iK+A1+k4AmC6Q+ORm2ekWaQVbMmeL2wc= +gitlab.dms3.io/dms3/go-log v0.0.2/go.mod h1:iLuW2zqJJaP1WdbME4P6AS9AIMovwGDAn3zV/riR5iE= +gitlab.dms3.io/dms3/go-log/v2 v2.0.1/go.mod h1:QNTmVEuz5pe4j73ve23UVWoiYW+ZBU8+bea5kX49W0k= +gitlab.dms3.io/dms3/go-metrics-interface v0.0.2 h1:ne/5rq39etdUVBeSl80e/BM6U8dLSzB/nMvSoZ6woXM= +gitlab.dms3.io/dms3/go-metrics-interface v0.0.2/go.mod h1:7A1hwAuYHn/sPzRzK2bF/yJwwyH85pqC9q09ezOCzgE= +gitlab.dms3.io/dms3/go-peertaskqueue v0.0.1 h1:ErRIV4muhFkKiWceoYDY/MWETuH4BLQKIdpPrrgE70s= +gitlab.dms3.io/dms3/go-peertaskqueue v0.0.1/go.mod h1:I/D/X9AbcQILDJswLa0Ye3YG5k6NlE2VteIi2KrtNPA= +gitlab.dms3.io/dms3/public/go-cid v0.0.1/go.mod h1:GQw3gc4CSrFY+aX6M+OBQDlg0p5/eQJoRrayaZzkAOQ= +gitlab.dms3.io/mf/go-multiaddr v0.0.2 h1:MZSIKYi5qBk5iurELtESiTsczdXauQhaqKOnk9vMJfI= +gitlab.dms3.io/mf/go-multiaddr v0.0.2/go.mod h1:BMNP0l/IA8/dbE6vNBf/ben61YNaPU0kn6wXgqZs9vU= +gitlab.dms3.io/mf/go-multiaddr-dns v0.0.1 h1:MVnASu0+LLtlaWANtLikvW22IggFu1MQCnWAxB0bLlI= +gitlab.dms3.io/mf/go-multiaddr-dns v0.0.1/go.mod h1:WDJELif8lHU9QPatIqVn2NBxg3F/a7ZOrX1rRZIewDk= +gitlab.dms3.io/mf/go-multiaddr-fmt v0.0.1 h1:rgX0NJwnjLUXA426H8la0BLxQLhCRcyzaZu5GX4Wsbc= +gitlab.dms3.io/mf/go-multiaddr-fmt v0.0.1/go.mod h1:IJHPnGlg0LS5nSWJW0tFX2VMDh9jjhetou42HElWdP4= +gitlab.dms3.io/p2p/go-buffer-pool v0.0.1 h1:2IUBfHOWOarILKvoF1NHnfSUD5MMI/g6f64ZTUwacoA= +gitlab.dms3.io/p2p/go-buffer-pool v0.0.1/go.mod h1:M+a5uWIUEkiKihMm5+IL0+pqzG6G7mcD6nNWosKUKO0= +gitlab.dms3.io/p2p/go-conn-security-multistream v0.0.1/go.mod h1:SCdGUSFmCt+twk7CbulZ9DGdX7ab9PTJLtAkDPyDU2I= +gitlab.dms3.io/p2p/go-conn-security-multistream v0.0.2 h1:sYH5gW8N/Xyni1lA5h5tVqnJLOmevDy5csinG4tpcMQ= +gitlab.dms3.io/p2p/go-conn-security-multistream v0.0.2/go.mod h1:xZy3z0EGD3IPU1SjzKA4XJZzNPG5/gL73JK3b7WuQuQ= +gitlab.dms3.io/p2p/go-eventbus v0.0.2 h1:LWfbTqq7c11/mnQspbIEndMjH2ZsC1e/WoPtQwyvN1U= +gitlab.dms3.io/p2p/go-eventbus v0.0.2/go.mod h1:5VG85wIN7DBS2Ih9ul4GDS+W7WBUHUKJsmcPE1jI9D4= +gitlab.dms3.io/p2p/go-flow-metrics v0.0.1/go.mod h1:0QFTYtRYP6Y1xfK0xycr40drfoM+0WVCoOXPyI0eFN0= +gitlab.dms3.io/p2p/go-flow-metrics v0.0.2 h1:btZSRSP181T6Ii3a5iJ6qeHdiPgPUdvowNH30Udl4J8= +gitlab.dms3.io/p2p/go-flow-metrics v0.0.2/go.mod h1:0QFTYtRYP6Y1xfK0xycr40drfoM+0WVCoOXPyI0eFN0= +gitlab.dms3.io/p2p/go-mplex v0.0.2 h1:H6IJCR9SIjM5W5uXe2HYAectN1HTJu/+eWkj0SkbZRY= +gitlab.dms3.io/p2p/go-mplex v0.0.2/go.mod h1:xybsl2Yr4EXk0OKjB8xt7Fn6GhHQfUYe63tbTmxKvNM= +gitlab.dms3.io/p2p/go-msgio v0.0.1 h1:Yh+40UhoamgwL8J3PD8wezS1ottJGHHwVdp/d1vIWZA= +gitlab.dms3.io/p2p/go-msgio v0.0.1/go.mod h1:H6AqMy1HOxZb+yEXj2d3o4wgWb+lJx9aYI+nWka46T0= +gitlab.dms3.io/p2p/go-nat v0.0.1 h1:dMthxnLjCPg76Wksn35prrJX+tAUDxbo1V379Ukg+Vo= +gitlab.dms3.io/p2p/go-nat v0.0.1/go.mod h1:gb6iaRnT0zacniY+4T5TINZqwnqzOE+TaUM/B3PIx2Q= +gitlab.dms3.io/p2p/go-netroute v0.0.1 h1:SJH1/YgST5q8823T0q/vj6OMaZ71dw+acgdJVQPbFtc= +gitlab.dms3.io/p2p/go-netroute v0.0.1/go.mod h1:Ppim8kqanv6FEn5EL8G1mFRvdKG6i1ubfyK/vqfCMd8= +gitlab.dms3.io/p2p/go-openssl v0.0.1 h1:BC9zzxiRliZmWNpxQQX37DDkmKpR2rXNUkwxRT0f6NI= +gitlab.dms3.io/p2p/go-openssl v0.0.1/go.mod h1:aQ3IGGzTd5vw48HiiXv6z6sFKiB52w2jZdg2TFNjylA= +gitlab.dms3.io/p2p/go-p2p v0.0.4 h1:BC0Wc+by6CpJrPElBBK623SxraI+wkPh/VBiZMbivA8= +gitlab.dms3.io/p2p/go-p2p v0.0.4/go.mod h1:CBN6+mQUNcSdrTSDemATpsTtVfqaYJlTafGlHQvr8Ek= +gitlab.dms3.io/p2p/go-p2p-addrutil v0.0.3 h1:gTmbuNDjuvXzYFoUCx8ZshTTldTg9CV57uvc9daS4hA= +gitlab.dms3.io/p2p/go-p2p-addrutil v0.0.3/go.mod h1:fA5JiqzWbwjYhtzUOa5Ul/ofZnXbtjNaGWyvQOjAHHg= +gitlab.dms3.io/p2p/go-p2p-autonat v0.0.2 h1:DSWeeq0NF9zkZqjGJOChT4zg1d7A9iIgPn9u180c+Gc= +gitlab.dms3.io/p2p/go-p2p-autonat v0.0.2/go.mod h1:ouO1a3o7PYfUlFJ/RjZ+GlDrTn4iOXDVawrk6Sinnz8= +gitlab.dms3.io/p2p/go-p2p-blankhost v0.0.2 h1:mrcXcG3yRvF2nng81tsyhSed1mMw5wi9UOxUtr2vvjY= +gitlab.dms3.io/p2p/go-p2p-blankhost v0.0.2/go.mod h1:bPiaYF/z88lfAIE9D8HX4ny/sxY2PNDWbjccyS5vtzI= +gitlab.dms3.io/p2p/go-p2p-circuit v0.0.2 h1:j9vbB0cOSTyVm0icLeT1vjcZPB+3yNTa3UFOU6WZE/4= +gitlab.dms3.io/p2p/go-p2p-circuit v0.0.2/go.mod h1:fWBL5xh9TtE4WToFOZ4k4onuEAS75QIOhJRoXjkr7OU= +gitlab.dms3.io/p2p/go-p2p-core v0.0.1/go.mod h1:xm5X+ChR6cZDKYaIDSmuCs6Uvd1GtbDy47xXjCljBAk= +gitlab.dms3.io/p2p/go-p2p-core v0.0.2 h1:mMe0MmJaiG4zzDSc2u6BwXeZ4o5P5EuRM4maXL0AP5Q= +gitlab.dms3.io/p2p/go-p2p-core v0.0.2/go.mod h1:V4Oqm6ZHKOwCcf0tkBJBomlztwnytpABtOsQIeD676E= +gitlab.dms3.io/p2p/go-p2p-discovery v0.0.2 h1:8i2m2dtHQYagf/VH4m/ioI5nRC2owXdWHx04h8mMKHI= +gitlab.dms3.io/p2p/go-p2p-discovery v0.0.2/go.mod h1:Vx7g6Ts3/WqFXnHfWHdiwUqaVn2reifwzQFsIkS/WzY= +gitlab.dms3.io/p2p/go-p2p-loggables v0.0.2 h1:JrX33OIQXjO91oF43tLoVcNuYjr51/G8RqoxBWcjv5A= +gitlab.dms3.io/p2p/go-p2p-loggables v0.0.2/go.mod h1:JvsQ/IfHnLxdxitAUhOVhdgK5swICx68Cjzh0xNih8Q= +gitlab.dms3.io/p2p/go-p2p-mplex v0.0.2 h1:bgDSuMuzesrMqLfromBUlSxX8W4Kmnw62Y0ceRFzpSQ= +gitlab.dms3.io/p2p/go-p2p-mplex v0.0.2/go.mod h1:bohexmBfoDjkqd2zx5s+uYsb066m3gPcppRGq4c8SIw= +gitlab.dms3.io/p2p/go-p2p-nat v0.0.2 h1:u+8CVPRiTGepY0Y3CVYk+IpX7n90w1v3jSYztXsiPD8= +gitlab.dms3.io/p2p/go-p2p-nat v0.0.2/go.mod h1:kQmgj7CHacl8Sx2ClF78HiwOR4DQoSXppG5ZhkDH5YQ= +gitlab.dms3.io/p2p/go-p2p-netutil v0.0.2 h1:nL77tfxNHIFjPu4rweG10QCsFdUuWqF853q9Be0zZIE= +gitlab.dms3.io/p2p/go-p2p-netutil v0.0.2/go.mod h1:xHoO71Y9ZtCPs1An55Gqo1X6xRPTtsp6DoqpdrOQ0NI= +gitlab.dms3.io/p2p/go-p2p-noise v0.0.2 h1:XowD4uw9kmmvuN6arXtZacJE+VlNqySjVkskIIa8GuU= +gitlab.dms3.io/p2p/go-p2p-noise v0.0.2/go.mod h1:InXtmMT0jZyX338Sw+e/9MCbjBHUEnV8MzwpuLPJ0t8= +gitlab.dms3.io/p2p/go-p2p-peerstore v0.0.2 h1:unnuqUTlAKZC1aka5MRGM+vzSu2Ky84+dotSo6/VyiQ= +gitlab.dms3.io/p2p/go-p2p-peerstore v0.0.2/go.mod h1:axBxiEgIlhQwLjBJh/6tE5mCSJK8+BL/wDAIExRC+ko= +gitlab.dms3.io/p2p/go-p2p-pnet v0.0.2 h1:8DK7on8L18v+zkxhP9cuO+99Xz07s3quY7s1Ni/OtJk= +gitlab.dms3.io/p2p/go-p2p-pnet v0.0.2/go.mod h1:VRKLLhFhnX6+1CZVNJBGm05mcnm5VJSHgn9ISmG6pME= +gitlab.dms3.io/p2p/go-p2p-quic-transport v0.0.2 h1:RFbo2tftK9rqpLUPdjMZz7HxqBM32/r1SO2potjwwSI= +gitlab.dms3.io/p2p/go-p2p-quic-transport v0.0.2/go.mod h1:G4tkjZ4lKjNSnPYS1PbYqIml3ZQ48KyIDhz4U91v904= +gitlab.dms3.io/p2p/go-p2p-record v0.0.2 h1:KnIBvn/lzQrmCfeU4n0YLa5TAxaVbexrC+UjGWkKr5g= +gitlab.dms3.io/p2p/go-p2p-record v0.0.2/go.mod h1:O9mpqp2teeU/e5WTcHiLldzXXOtAADLG1S/0AYbj4pw= +gitlab.dms3.io/p2p/go-p2p-swarm v0.0.2 h1:4LUtuFdlTD3me/m91KRQtaVHq/a7S7g5f23D2iXCXPs= +gitlab.dms3.io/p2p/go-p2p-swarm v0.0.2/go.mod h1:Ec7L5HlLhYPQirQZh7YmghON2QIz35IQTljBPZbwsdI= +gitlab.dms3.io/p2p/go-p2p-testing v0.0.1/go.mod h1:Wy5MSYvMlDtf8GZ9rOdZm0Sxv57XCh1XqNI5z9E6JrU= +gitlab.dms3.io/p2p/go-p2p-testing v0.0.2 h1:mAAzsnTbNDDXToKxAWs1dSJTFcG8jO4vwmgg/JRYdCE= +gitlab.dms3.io/p2p/go-p2p-testing v0.0.2/go.mod h1:2PatJW3XwpOT2bxlR3Q9IqBuwZaMN483guaseyQkdZk= +gitlab.dms3.io/p2p/go-p2p-tls v0.0.2 h1:Yi1r9AqDOigPmaY4oBrZvbMxgBiwvhJyHYeM5g5vp8I= +gitlab.dms3.io/p2p/go-p2p-tls v0.0.2/go.mod h1:vi2UEOaT1Q+g987bkBiaSnamdSyzZgHn88ZsAhZgN2o= +gitlab.dms3.io/p2p/go-p2p-transport-upgrader v0.0.2 h1:YgWZHt4fkBdAVdH6oU86jM6hgxsbeZYX/hm7xjPMOlA= +gitlab.dms3.io/p2p/go-p2p-transport-upgrader v0.0.2/go.mod h1:+iia44MgzuwSZCssqZ2CvVW8l/nVRYDNUVMB66TPz8Q= +gitlab.dms3.io/p2p/go-p2p-yamux v0.0.3 h1:YGHb5AcxmPFIzZUYsKkvi1ygJGJFIcub3j6aAg4eLbk= +gitlab.dms3.io/p2p/go-p2p-yamux v0.0.3/go.mod h1:3w5QQCPB3Yn8QSocKe62TNhUyNTMoUhyvg0HJsny+Iw= +gitlab.dms3.io/p2p/go-reuseport v0.0.1 h1:2UyE9NOwOMgSx8Shitp9FM+QgUSC7y4SAvlnKpho0Xc= +gitlab.dms3.io/p2p/go-reuseport v0.0.1/go.mod h1:T+dBjUvmAjEDJXLvO3lynKdo0cHmZ2uTRKGDnT6huZY= +gitlab.dms3.io/p2p/go-reuseport-transport v0.0.2 h1:62Eu6+wb8c7Wfiw+qS+e3f298ornMz1XT9DISaU5EIk= +gitlab.dms3.io/p2p/go-reuseport-transport v0.0.2/go.mod h1:KTQdhF/NscZtgpHvWYtMZrE4XD7HB6y9fPMuQZX+kHc= +gitlab.dms3.io/p2p/go-sockaddr v0.0.1 h1:VupmB55WkCvJxOveddXzkKadW/jtb9JEeA0Q572AWL4= +gitlab.dms3.io/p2p/go-sockaddr v0.0.1/go.mod h1:g7iIq8MDEq2jtHLw5AlFNjEqAuer7C11HT0A77TlMik= +gitlab.dms3.io/p2p/go-stream-muxer-multistream v0.0.2 h1:I6jIBJsNMhJKBdD44InRMHGCfthVgrGZrqgr4hzVgyY= +gitlab.dms3.io/p2p/go-stream-muxer-multistream v0.0.2/go.mod h1:a9scy+6nGbVOVNXkJrP53UEPWgwW2GUw5v6xyQ/JOtk= +gitlab.dms3.io/p2p/go-tcp-transport v0.0.2 h1:Y8pLb7Pde2+WoKxobwymzyT0OxAD/LGeNSCr5/p0GWk= +gitlab.dms3.io/p2p/go-tcp-transport v0.0.2/go.mod h1:enLNmOWAlnh6Adv9H8gU8A5oa/xFxvDdjgo/JDsfxYY= +gitlab.dms3.io/p2p/go-ws-transport v0.0.2 h1:3lhSs1mXn+DlmYncAifY+z/w13NDNgPIXSiJD7KGvZo= +gitlab.dms3.io/p2p/go-ws-transport v0.0.2/go.mod h1:khP6cF12STV7r52EjaB95OGlaOFWHaxiXcw/lJgIwOM= +gitlab.dms3.io/p2p/go-yamux/v2 v2.0.0 h1:KyrxDkqwBeNLlLteQGLJtPbKuYOMM5mJ+1UwI+IFl7A= +gitlab.dms3.io/p2p/go-yamux/v2 v2.0.0/go.mod h1:+RHkU0NF5DxnUv6cOYeuWNcLlcBRP+Eo3zM2OTlk64Y= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf h1:B2n+Zi5QeYRDAEodEu72OS36gmTWjgpXr2+cWcBW90o= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210608053332-aa57babbf139 h1:C+AwYEtBp/VQwoLntUmQ/yx3MS9vmZaKNdw5eOpoQe8= +golang.org/x/sys v0.0.0-20210608053332-aa57babbf139/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/internal/blockpresencemanager/blockpresencemanager.go b/internal/blockpresencemanager/blockpresencemanager.go new file mode 100644 index 0000000000000000000000000000000000000000..eeff555d0ea286cf6cfb84819a64930724afbb38 --- /dev/null +++ b/internal/blockpresencemanager/blockpresencemanager.go @@ -0,0 +1,121 @@ +package blockpresencemanager + +import ( + "sync" + + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +// BlockPresenceManager keeps track of which peers have indicated that they +// have or explicitly don't have a block +type BlockPresenceManager struct { + sync.RWMutex + presence map[cid.Cid]map[peer.ID]bool +} + +func New() *BlockPresenceManager { + return &BlockPresenceManager{ + presence: make(map[cid.Cid]map[peer.ID]bool), + } +} + +// ReceiveFrom is called when a peer sends us information about which blocks +// it has and does not have +func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHaves []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range haves { + bpm.updateBlockPresence(p, c, true) + } + for _, c := range dontHaves { + bpm.updateBlockPresence(p, c, false) + } +} + +func (bpm *BlockPresenceManager) updateBlockPresence(p peer.ID, c cid.Cid, present bool) { + _, ok := bpm.presence[c] + if !ok { + bpm.presence[c] = make(map[peer.ID]bool) + } + + // Make sure not to change HAVE to DONT_HAVE + has, pok := bpm.presence[c][p] + if pok && has { + return + } + bpm.presence[c][p] = present +} + +// PeerHasBlock indicates whether the given peer has sent a HAVE for the given +// cid +func (bpm *BlockPresenceManager) PeerHasBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + return bpm.presence[c][p] +} + +// PeerDoesNotHaveBlock indicates whether the given peer has sent a DONT_HAVE +// for the given cid +func (bpm *BlockPresenceManager) PeerDoesNotHaveBlock(p peer.ID, c cid.Cid) bool { + bpm.RLock() + defer bpm.RUnlock() + + have, known := bpm.presence[c][p] + return known && !have +} + +// Filters the keys such that all the given peers have received a DONT_HAVE +// for a key. +// This allows us to know if we've exhausted all possibilities of finding +// the key with the peers we know about. +func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []cid.Cid) []cid.Cid { + bpm.RLock() + defer bpm.RUnlock() + + var res []cid.Cid + for _, c := range ks { + if bpm.allDontHave(peers, c) { + res = append(res, c) + } + } + return res +} + +func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { + // Check if we know anything about the cid's block presence + ps, cok := bpm.presence[c] + if !cok { + return false + } + + // Check if we explicitly know that all the given peers do not have the cid + for _, p := range peers { + if has, pok := ps[p]; !pok || has { + return false + } + } + return true +} + +// RemoveKeys cleans up the given keys from the block presence map +func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { + bpm.Lock() + defer bpm.Unlock() + + for _, c := range ks { + delete(bpm.presence, c) + } +} + +// HasKey indicates whether the BlockPresenceManager is tracking the given key +// (used by the tests) +func (bpm *BlockPresenceManager) HasKey(c cid.Cid) bool { + bpm.Lock() + defer bpm.Unlock() + + _, ok := bpm.presence[c] + return ok +} diff --git a/internal/blockpresencemanager/blockpresencemanager_test.go b/internal/blockpresencemanager/blockpresencemanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1550d33f85667d933aef0daadd77b33f2be029bc --- /dev/null +++ b/internal/blockpresencemanager/blockpresencemanager_test.go @@ -0,0 +1,239 @@ +package blockpresencemanager + +import ( + "fmt" + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" + + cid "gitlab.dms3.io/dms3/go-cid" +) + +const ( + expHasFalseMsg = "Expected PeerHasBlock to return false" + expHasTrueMsg = "Expected PeerHasBlock to return true" + expDoesNotHaveFalseMsg = "Expected PeerDoesNotHaveBlock to return false" + expDoesNotHaveTrueMsg = "Expected PeerDoesNotHaveBlock to return true" +) + +func TestBlockPresenceManager(t *testing.T) { + bpm := New() + + p := testutil.GeneratePeers(1)[0] + cids := testutil.GenerateCids(2) + c0 := cids[0] + c1 := cids[1] + + // Nothing stored yet, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // HAVE cid0 / DONT_HAVE cid1 + bpm.ReceiveFrom(p, []cid.Cid{c0}, []cid.Cid{c1}) + + // Peer has received HAVE for cid0 + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Peer has received DONT_HAVE for cid1 + if !bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveTrueMsg) + } + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + + // HAVE cid1 / DONT_HAVE cid0 + bpm.ReceiveFrom(p, []cid.Cid{c1}, []cid.Cid{c0}) + + // DONT_HAVE cid0 should NOT over-write earlier HAVE cid0 + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if !bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasTrueMsg) + } + + // HAVE cid1 should over-write earlier DONT_HAVE cid1 + if !bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasTrueMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid0 + bpm.RemoveKeys([]cid.Cid{c0}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c0) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c0) { + t.Fatal(expDoesNotHaveFalseMsg) + } + + // Remove cid1 + bpm.RemoveKeys([]cid.Cid{c1}) + + // Nothing stored, both PeerHasBlock and PeerDoesNotHaveBlock should + // return false + if bpm.PeerHasBlock(p, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAddRemoveMulti(t *testing.T) { + bpm := New() + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // p0: HAVE cid0, cid1 / DONT_HAVE cid1, cid2 + // p1: HAVE cid1, cid2 / DONT_HAVE cid0 + bpm.ReceiveFrom(p0, []cid.Cid{c0, c1}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1, c2}, []cid.Cid{c0}) + + // Peer 0 should end up with + // - HAVE cid0 + // - HAVE cid1 + // - DONT_HAVE cid2 + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Peer 1 should end up with + // - HAVE cid1 + // - HAVE cid2 + // - DONT_HAVE cid0 + if !bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // Remove cid1 and cid2. Should end up with + // Peer 0: HAVE cid0 + // Peer 1: DONT_HAVE cid0 + bpm.RemoveKeys([]cid.Cid{c1, c2}) + if !bpm.PeerHasBlock(p0, c0) { + t.Fatal(expHasTrueMsg) + } + if !bpm.PeerDoesNotHaveBlock(p1, c0) { + t.Fatal(expDoesNotHaveTrueMsg) + } + + // The other keys should have been cleared, so both HasBlock() and + // DoesNotHaveBlock() should return false + if bpm.PeerHasBlock(p0, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p0, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p0, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c1) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c1) { + t.Fatal(expDoesNotHaveFalseMsg) + } + if bpm.PeerHasBlock(p1, c2) { + t.Fatal(expHasFalseMsg) + } + if bpm.PeerDoesNotHaveBlock(p1, c2) { + t.Fatal(expDoesNotHaveFalseMsg) + } +} + +func TestAllPeersDoNotHaveBlock(t *testing.T) { + bpm := New() + + peers := testutil.GeneratePeers(3) + p0 := peers[0] + p1 := peers[1] + p2 := peers[2] + + cids := testutil.GenerateCids(3) + c0 := cids[0] + c1 := cids[1] + c2 := cids[2] + + // c0 c1 c2 + // p0 ? N N + // p1 N Y ? + // p2 Y Y N + bpm.ReceiveFrom(p0, []cid.Cid{}, []cid.Cid{c1, c2}) + bpm.ReceiveFrom(p1, []cid.Cid{c1}, []cid.Cid{c0}) + bpm.ReceiveFrom(p2, []cid.Cid{c0, c1}, []cid.Cid{c2}) + + type testcase struct { + peers []peer.ID + ks []cid.Cid + exp []cid.Cid + } + + testcases := []testcase{ + testcase{[]peer.ID{p0}, []cid.Cid{c0}, []cid.Cid{}}, + testcase{[]peer.ID{p1}, []cid.Cid{c0}, []cid.Cid{c0}}, + testcase{[]peer.ID{p2}, []cid.Cid{c0}, []cid.Cid{}}, + + testcase{[]peer.ID{p0}, []cid.Cid{c1}, []cid.Cid{c1}}, + testcase{[]peer.ID{p1}, []cid.Cid{c1}, []cid.Cid{}}, + testcase{[]peer.ID{p2}, []cid.Cid{c1}, []cid.Cid{}}, + + testcase{[]peer.ID{p0}, []cid.Cid{c2}, []cid.Cid{c2}}, + testcase{[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, + testcase{[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, + + // p0 recieved DONT_HAVE for c1 & c2 (but not for c0) + testcase{[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, + testcase{[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + // Both p0 and p2 received DONT_HAVE for c2 + testcase{[]peer.ID{p0, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{c2}}, + testcase{[]peer.ID{p0, p1, p2}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, + } + + for i, tc := range testcases { + if !testutil.MatchKeysIgnoreOrder( + bpm.AllPeersDoNotHaveBlock(tc.peers, tc.ks), + tc.exp, + ) { + t.Fatal(fmt.Sprintf("test case %d failed: expected matching keys", i)) + } + } +} diff --git a/internal/decision/blockstoremanager.go b/internal/decision/blockstoremanager.go new file mode 100644 index 0000000000000000000000000000000000000000..fb29303c1ed9eba9c0d14137b5b898bdce7fd331 --- /dev/null +++ b/internal/decision/blockstoremanager.go @@ -0,0 +1,126 @@ +package decision + +import ( + "context" + "fmt" + "sync" + + process "github.com/jbenet/goprocess" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + bstore "gitlab.dms3.io/dms3/go-dms3-blockstore" +) + +// blockstoreManager maintains a pool of workers that make requests to the blockstore. +type blockstoreManager struct { + bs bstore.Blockstore + workerCount int + jobs chan func() + px process.Process +} + +// newBlockstoreManager creates a new blockstoreManager with the given context +// and number of workers +func newBlockstoreManager(bs bstore.Blockstore, workerCount int) *blockstoreManager { + return &blockstoreManager{ + bs: bs, + workerCount: workerCount, + jobs: make(chan func()), + px: process.WithTeardown(func() error { return nil }), + } +} + +func (bsm *blockstoreManager) start(px process.Process) { + px.AddChild(bsm.px) + // Start up workers + for i := 0; i < bsm.workerCount; i++ { + bsm.px.Go(func(px process.Process) { + bsm.worker(px) + }) + } +} + +func (bsm *blockstoreManager) worker(px process.Process) { + for { + select { + case <-px.Closing(): + return + case job := <-bsm.jobs: + job() + } + } +} + +func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-bsm.px.Closing(): + return fmt.Errorf("shutting down") + case bsm.jobs <- job: + return nil + } +} + +func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) (map[cid.Cid]int, error) { + res := make(map[cid.Cid]int) + if len(ks) == 0 { + return res, nil + } + + var lk sync.Mutex + return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + size, err := bsm.bs.GetSize(c) + if err != nil { + if err != bstore.ErrNotFound { + // Note: this isn't a fatal error. We shouldn't abort the request + log.Errorf("blockstore.GetSize(%s) error: %s", c, err) + } + } else { + lk.Lock() + res[c] = size + lk.Unlock() + } + }) +} + +func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) { + res := make(map[cid.Cid]blocks.Block) + if len(ks) == 0 { + return res, nil + } + + var lk sync.Mutex + return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { + blk, err := bsm.bs.Get(c) + if err != nil { + if err != bstore.ErrNotFound { + // Note: this isn't a fatal error. We shouldn't abort the request + log.Errorf("blockstore.Get(%s) error: %s", c, err) + } + } else { + lk.Lock() + res[c] = blk + lk.Unlock() + } + }) +} + +func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) error { + var err error + wg := sync.WaitGroup{} + for _, k := range ks { + c := k + wg.Add(1) + err = bsm.addJob(ctx, func() { + jobFn(c) + wg.Done() + }) + if err != nil { + wg.Done() + break + } + } + wg.Wait() + return err +} diff --git a/internal/decision/blockstoremanager_test.go b/internal/decision/blockstoremanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2ba7710bb2a8e117769fa22bde216391c92ee4e7 --- /dev/null +++ b/internal/decision/blockstoremanager_test.go @@ -0,0 +1,258 @@ +package decision + +import ( + "context" + "crypto/rand" + "sync" + "testing" + "time" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" + + process "github.com/jbenet/goprocess" + blocks "gitlab.dms3.io/dms3/go-block-format" + ds "gitlab.dms3.io/dms3/go-datastore" + "gitlab.dms3.io/dms3/go-datastore/delayed" + ds_sync "gitlab.dms3.io/dms3/go-datastore/sync" + blockstore "gitlab.dms3.io/dms3/go-dms3-blockstore" + delay "gitlab.dms3.io/dms3/go-dms3-delay" +) + +func TestBlockstoreManagerNotFoundKey(t *testing.T) { + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(bstore, 5) + bsm.start(process.WithTeardown(func() error { return nil })) + + cids := testutil.GenerateCids(4) + sizes, err := bsm.getBlockSizes(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(sizes) != 0 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + if _, ok := sizes[c]; ok { + t.Fatal("Non-existent block should have no size") + } + } + + blks, err := bsm.getBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(blks) != 0 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + if _, ok := blks[c]; ok { + t.Fatal("Non-existent block should have no size") + } + } +} + +func TestBlockstoreManager(t *testing.T) { + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(bstore, 5) + bsm.start(process.WithTeardown(func() error { return nil })) + + exp := make(map[cid.Cid]blocks.Block) + var blks []blocks.Block + for i := 0; i < 32; i++ { + buf := make([]byte, 1024*(i+1)) + _, _ = rand.Read(buf) + b := blocks.NewBlock(buf) + blks = append(blks, b) + exp[b.Cid()] = b + } + + // Put all blocks in the blockstore except the last one + if err := bstore.PutMany(blks[:len(blks)-1]); err != nil { + t.Fatal(err) + } + + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + sizes, err := bsm.getBlockSizes(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(sizes) != len(blks)-1 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + expSize := len(exp[c].RawData()) + size, ok := sizes[c] + + // Only the last key should be missing + if c.Equals(cids[len(cids)-1]) { + if ok { + t.Fatal("Non-existent block should not be in sizes map") + } + } else { + if !ok { + t.Fatal("Block should be in sizes map") + } + if size != expSize { + t.Fatal("Block has wrong size") + } + } + } + + fetched, err := bsm.getBlocks(ctx, cids) + if err != nil { + t.Fatal(err) + } + if len(fetched) != len(blks)-1 { + t.Fatal("Wrong response length") + } + + for _, c := range cids { + blk, ok := fetched[c] + + // Only the last key should be missing + if c.Equals(cids[len(cids)-1]) { + if ok { + t.Fatal("Non-existent block should not be in blocks map") + } + } else { + if !ok { + t.Fatal("Block should be in blocks map") + } + if !blk.Cid().Equals(c) { + t.Fatal("Block has wrong cid") + } + } + } +} + +func TestBlockstoreManagerConcurrency(t *testing.T) { + ctx := context.Background() + bsdelay := delay.Fixed(3 * time.Millisecond) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + workerCount := 5 + bsm := newBlockstoreManager(bstore, workerCount) + bsm.start(process.WithTeardown(func() error { return nil })) + + blkSize := int64(8 * 1024) + blks := testutil.GenerateBlocksOfSize(32, blkSize) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(blks) + if err != nil { + t.Fatal(err) + } + + // Create more concurrent requests than the number of workers + wg := sync.WaitGroup{} + for i := 0; i < 16; i++ { + wg.Add(1) + + go func(t *testing.T) { + defer wg.Done() + + sizes, err := bsm.getBlockSizes(ctx, ks) + if err != nil { + t.Error(err) + } + if len(sizes) != len(blks) { + t.Error("Wrong response length") + } + }(t) + } + wg.Wait() +} + +func TestBlockstoreManagerClose(t *testing.T) { + ctx := context.Background() + delayTime := 20 * time.Millisecond + bsdelay := delay.Fixed(delayTime) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(bstore, 3) + px := process.WithTeardown(func() error { return nil }) + bsm.start(px) + + blks := testutil.GenerateBlocksOfSize(10, 1024) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(blks) + if err != nil { + t.Fatal(err) + } + + go px.Close() + + time.Sleep(5 * time.Millisecond) + + before := time.Now() + _, err = bsm.getBlockSizes(ctx, ks) + if err == nil { + t.Error("expected an error") + } + // would expect to wait delayTime*10 if we didn't cancel. + if time.Since(before) > delayTime*2 { + t.Error("expected a fast timeout") + } +} + +func TestBlockstoreManagerCtxDone(t *testing.T) { + delayTime := 20 * time.Millisecond + bsdelay := delay.Fixed(delayTime) + + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + bstore := blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)) + + bsm := newBlockstoreManager(bstore, 3) + proc := process.WithTeardown(func() error { return nil }) + bsm.start(proc) + + blks := testutil.GenerateBlocksOfSize(10, 1024) + var ks []cid.Cid + for _, b := range blks { + ks = append(ks, b.Cid()) + } + + err := bstore.PutMany(blks) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), delayTime/2) + defer cancel() + + before := time.Now() + _, err = bsm.getBlockSizes(ctx, ks) + if err == nil { + t.Error("expected an error") + } + + // would expect to wait delayTime*10 if we didn't cancel. + if time.Since(before) > delayTime*2 { + t.Error("expected a fast timeout") + } +} diff --git a/internal/decision/engine.go b/internal/decision/engine.go new file mode 100644 index 0000000000000000000000000000000000000000..308b676c3da2fb1cf9e382c2dc348898430da6f8 --- /dev/null +++ b/internal/decision/engine.go @@ -0,0 +1,729 @@ +// Package decision implements the decision engine for the bitswap service. +package decision + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/uuid" + + process "github.com/jbenet/goprocess" + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + wl "gitlab.dms3.io/dms3/go-bitswap/wantlist" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + bstore "gitlab.dms3.io/dms3/go-dms3-blockstore" + logging "gitlab.dms3.io/dms3/go-log" + "gitlab.dms3.io/dms3/go-peertaskqueue" + "gitlab.dms3.io/dms3/go-peertaskqueue/peertask" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +// TODO consider taking responsibility for other types of requests. For +// example, there could be a |cancelQueue| for all of the cancellation +// messages that need to go out. There could also be a |wantlistQueue| for +// the local peer's wantlists. Alternatively, these could all be bundled +// into a single, intelligent global queue that efficiently +// batches/combines and takes all of these into consideration. +// +// Right now, messages go onto the network for four reasons: +// 1. an initial `sendwantlist` message to a provider of the first key in a +// request +// 2. a periodic full sweep of `sendwantlist` messages to all providers +// 3. upon receipt of blocks, a `cancel` message to all peers +// 4. draining the priority queue of `blockrequests` from peers +// +// Presently, only `blockrequests` are handled by the decision engine. +// However, there is an opportunity to give it more responsibility! If the +// decision engine is given responsibility for all of the others, it can +// intelligently decide how to combine requests efficiently. +// +// Some examples of what would be possible: +// +// * when sending out the wantlists, include `cancel` requests +// * when handling `blockrequests`, include `sendwantlist` and `cancel` as +// appropriate +// * when handling `cancel`, if we recently received a wanted block from a +// peer, include a partial wantlist that contains a few other high priority +// blocks +// +// In a sense, if we treat the decision engine as a black box, it could do +// whatever it sees fit to produce desired outcomes (get wanted keys +// quickly, maintain good relationships with peers, etc). + +var log = logging.Logger("engine") + +const ( + // outboxChanBuffer must be 0 to prevent stale messages from being sent + outboxChanBuffer = 0 + // targetMessageSize is the ideal size of the batched payload. We try to + // pop this much data off the request queue, but it may be a little more + // or less depending on what's in the queue. + targetMessageSize = 16 * 1024 + // tagFormat is the tag given to peers associated an engine + tagFormat = "bs-engine-%s-%s" + + // queuedTagWeight is the default weight for peers that have work queued + // on their behalf. + queuedTagWeight = 10 + + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock = 1024 + + // Number of concurrent workers that pull tasks off the request queue + taskWorkerCount = 8 +) + +// Envelope contains a message for a Peer. +type Envelope struct { + // Peer is the intended recipient. + Peer peer.ID + + // Message is the payload. + Message bsmsg.BitSwapMessage + + // A callback to notify the decision queue that the task is complete + Sent func() +} + +// PeerTagger covers the methods on the connection manager used by the decision +// engine to tag peers +type PeerTagger interface { + TagPeer(peer.ID, string, int) + UntagPeer(p peer.ID, tag string) +} + +// Assigns a specific score to a peer +type ScorePeerFunc func(peer.ID, int) + +// ScoreLedger is an external ledger dealing with peer scores. +type ScoreLedger interface { + // Returns aggregated data communication with a given peer. + GetReceipt(p peer.ID) *Receipt + // Increments the sent counter for the given peer. + AddToSentBytes(p peer.ID, n int) + // Increments the received counter for the given peer. + AddToReceivedBytes(p peer.ID, n int) + // PeerConnected should be called when a new peer connects, + // meaning the ledger should open accounting. + PeerConnected(p peer.ID) + // PeerDisconnected should be called when a peer disconnects to + // clean up the accounting. + PeerDisconnected(p peer.ID) + // Starts the ledger sampling process. + Start(scorePeer ScorePeerFunc) + // Stops the sampling process. + Stop() +} + +// Engine manages sending requested blocks to peers. +type Engine struct { + // peerRequestQueue is a priority queue of requests received from peers. + // Requests are popped from the queue, packaged up, and placed in the + // outbox. + peerRequestQueue *peertaskqueue.PeerTaskQueue + + // FIXME it's a bit odd for the client and the worker to both share memory + // (both modify the peerRequestQueue) and also to communicate over the + // workSignal channel. consider sending requests over the channel and + // allowing the worker to have exclusive access to the peerRequestQueue. In + // that case, no lock would be required. + workSignal chan struct{} + + // outbox contains outgoing messages to peers. This is owned by the + // taskWorker goroutine + outbox chan (<-chan *Envelope) + + bsm *blockstoreManager + + peerTagger PeerTagger + + tagQueued, tagUseful string + + lock sync.RWMutex // protects the fields immediatly below + + // ledgerMap lists block-related Ledgers by their Partner key. + ledgerMap map[peer.ID]*ledger + + // an external ledger dealing with peer scores + scoreLedger ScoreLedger + + ticker *time.Ticker + + taskWorkerLock sync.Mutex + taskWorkerCount int + + // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in + // bytes up to which we will replace a want-have with a want-block + maxBlockSizeReplaceHasWithBlock int + + sendDontHaves bool + + self peer.ID +} + +// NewEngine creates a new block sending engine for the given block store +func NewEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagger, self peer.ID, scoreLedger ScoreLedger) *Engine { + return newEngine(bs, bstoreWorkerCount, peerTagger, self, maxBlockSizeReplaceHasWithBlock, scoreLedger) +} + +// This constructor is used by the tests +func newEngine(bs bstore.Blockstore, bstoreWorkerCount int, peerTagger PeerTagger, self peer.ID, + maxReplaceSize int, scoreLedger ScoreLedger) *Engine { + + if scoreLedger == nil { + scoreLedger = NewDefaultScoreLedger() + } + + e := &Engine{ + ledgerMap: make(map[peer.ID]*ledger), + scoreLedger: scoreLedger, + bsm: newBlockstoreManager(bs, bstoreWorkerCount), + peerTagger: peerTagger, + outbox: make(chan (<-chan *Envelope), outboxChanBuffer), + workSignal: make(chan struct{}, 1), + ticker: time.NewTicker(time.Millisecond * 100), + maxBlockSizeReplaceHasWithBlock: maxReplaceSize, + taskWorkerCount: taskWorkerCount, + sendDontHaves: true, + self: self, + } + e.tagQueued = fmt.Sprintf(tagFormat, "queued", uuid.New().String()) + e.tagUseful = fmt.Sprintf(tagFormat, "useful", uuid.New().String()) + e.peerRequestQueue = peertaskqueue.New( + peertaskqueue.OnPeerAddedHook(e.onPeerAdded), + peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), + peertaskqueue.TaskMerger(newTaskMerger()), + peertaskqueue.IgnoreFreezing(true)) + return e +} + +// SetSendDontHaves indicates what to do when the engine receives a want-block +// for a block that is not in the blockstore. Either +// - Send a DONT_HAVE message +// - Simply don't respond +// Older versions of Bitswap did not respond, so this allows us to simulate +// those older versions for testing. +func (e *Engine) SetSendDontHaves(send bool) { + e.sendDontHaves = send +} + +// Starts the score ledger. Before start the function checks and, +// if it is unset, initializes the scoreLedger with the default +// implementation. +func (e *Engine) startScoreLedger(px process.Process) { + e.scoreLedger.Start(func(p peer.ID, score int) { + if score == 0 { + e.peerTagger.UntagPeer(p, e.tagUseful) + } else { + e.peerTagger.TagPeer(p, e.tagUseful, score) + } + }) + px.Go(func(ppx process.Process) { + <-ppx.Closing() + e.scoreLedger.Stop() + }) +} + +// Start up workers to handle requests from other nodes for the data on this node +func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { + // Start up blockstore manager + e.bsm.start(px) + e.startScoreLedger(px) + + for i := 0; i < e.taskWorkerCount; i++ { + px.Go(func(px process.Process) { + e.taskWorker(ctx) + }) + } +} + +func (e *Engine) onPeerAdded(p peer.ID) { + e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) +} + +func (e *Engine) onPeerRemoved(p peer.ID) { + e.peerTagger.UntagPeer(p, e.tagQueued) +} + +// WantlistForPeer returns the list of keys that the given peer has asked for +func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { + partner := e.findOrCreate(p) + + partner.lk.Lock() + entries := partner.wantList.Entries() + partner.lk.Unlock() + + wl.SortEntries(entries) + + return entries +} + +// LedgerForPeer returns aggregated data communication with a given peer. +func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { + return e.scoreLedger.GetReceipt(p) +} + +// Each taskWorker pulls items off the request queue up to the maximum size +// and adds them to an envelope that is passed off to the bitswap workers, +// which send the message to the network. +func (e *Engine) taskWorker(ctx context.Context) { + defer e.taskWorkerExit() + for { + oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking + select { + case <-ctx.Done(): + return + case e.outbox <- oneTimeUse: + } + // receiver is ready for an outoing envelope. let's prepare one. first, + // we must acquire a task from the PQ... + envelope, err := e.nextEnvelope(ctx) + if err != nil { + close(oneTimeUse) + return // ctx cancelled + } + oneTimeUse <- envelope // buffered. won't block + close(oneTimeUse) + } +} + +// taskWorkerExit handles cleanup of task workers +func (e *Engine) taskWorkerExit() { + e.taskWorkerLock.Lock() + defer e.taskWorkerLock.Unlock() + + e.taskWorkerCount-- + if e.taskWorkerCount == 0 { + close(e.outbox) + } +} + +// nextEnvelope runs in the taskWorker goroutine. Returns an error if the +// context is cancelled before the next Envelope can be created. +func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { + for { + // Pop some tasks off the request queue + p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(targetMessageSize) + for len(nextTasks) == 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-e.workSignal: + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) + case <-e.ticker.C: + // When a task is cancelled, the queue may be "frozen" for a + // period of time. We periodically "thaw" the queue to make + // sure it doesn't get stuck in a frozen state. + e.peerRequestQueue.ThawRound() + p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(targetMessageSize) + } + } + + // Create a new message + msg := bsmsg.New(false) + + log.Debugw("Bitswap process tasks", "local", e.self, "taskCount", len(nextTasks)) + + // Amount of data in the request queue still waiting to be popped + msg.SetPendingBytes(int32(pendingBytes)) + + // Split out want-blocks, want-haves and DONT_HAVEs + blockCids := make([]cid.Cid, 0, len(nextTasks)) + blockTasks := make(map[cid.Cid]*taskData, len(nextTasks)) + for _, t := range nextTasks { + c := t.Topic.(cid.Cid) + td := t.Data.(*taskData) + if td.HaveBlock { + if td.IsWantBlock { + blockCids = append(blockCids, c) + blockTasks[c] = td + } else { + // Add HAVES to the message + msg.AddHave(c) + } + } else { + // Add DONT_HAVEs to the message + msg.AddDontHave(c) + } + } + + // Fetch blocks from datastore + blks, err := e.bsm.getBlocks(ctx, blockCids) + if err != nil { + // we're dropping the envelope but that's not an issue in practice. + return nil, err + } + + for c, t := range blockTasks { + blk := blks[c] + // If the block was not found (it has been removed) + if blk == nil { + // If the client requested DONT_HAVE, add DONT_HAVE to the message + if t.SendDontHave { + msg.AddDontHave(c) + } + } else { + // Add the block to the message + // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", e.self, p, c, len(blk.RawData())) + msg.AddBlock(blk) + } + } + + // If there's nothing in the message, bail out + if msg.Empty() { + e.peerRequestQueue.TasksDone(p, nextTasks...) + continue + } + + log.Debugw("Bitswap engine -> msg", "local", e.self, "to", p, "blockCount", len(msg.Blocks()), "presenceCount", len(msg.BlockPresences()), "size", msg.Size()) + return &Envelope{ + Peer: p, + Message: msg, + Sent: func() { + // Once the message has been sent, signal the request queue so + // it can be cleared from the queue + e.peerRequestQueue.TasksDone(p, nextTasks...) + + // Signal the worker to check for more work + e.signalNewWork() + }, + }, nil + } +} + +// Outbox returns a channel of one-time use Envelope channels. +func (e *Engine) Outbox() <-chan (<-chan *Envelope) { + return e.outbox +} + +// Peers returns a slice of Peers with whom the local node has active sessions. +func (e *Engine) Peers() []peer.ID { + e.lock.RLock() + defer e.lock.RUnlock() + + response := make([]peer.ID, 0, len(e.ledgerMap)) + + for _, ledger := range e.ledgerMap { + response = append(response, ledger.Partner) + } + return response +} + +// MessageReceived is called when a message is received from a remote peer. +// For each item in the wantlist, add a want-have or want-block entry to the +// request queue (this is later popped off by the workerTasks) +func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { + entries := m.Wantlist() + + if len(entries) > 0 { + log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) + for _, et := range entries { + if !et.Cancel { + if et.WantType == pb.Message_Wantlist_Have { + log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", et.Cid) + } else { + log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", et.Cid) + } + } + } + } + + if m.Empty() { + log.Infof("received empty message from %s", p) + } + + newWorkExists := false + defer func() { + if newWorkExists { + e.signalNewWork() + } + }() + + // Get block sizes + wants, cancels := e.splitWantsCancels(entries) + wantKs := cid.NewSet() + for _, entry := range wants { + wantKs.Add(entry.Cid) + } + blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) + if err != nil { + log.Info("aborting message processing", err) + return + } + + // Get the ledger for the peer + l := e.findOrCreate(p) + l.lk.Lock() + defer l.lk.Unlock() + + // If the peer sent a full wantlist, replace the ledger's wantlist + if m.Full() { + l.wantList = wl.New() + } + + var activeEntries []peertask.Task + + // Remove cancelled blocks from the queue + for _, entry := range cancels { + log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) + if l.CancelWant(entry.Cid) { + e.peerRequestQueue.Remove(entry.Cid, p) + } + } + + // For each want-have / want-block + for _, entry := range wants { + c := entry.Cid + blockSize, found := blockSizes[entry.Cid] + + // Add each want-have / want-block to the ledger + l.Wants(c, entry.Priority, entry.WantType) + + // If the block was not found + if !found { + log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) + + // Only add the task to the queue if the requester wants a DONT_HAVE + if e.sendDontHaves && entry.SendDontHave { + newWorkExists = true + isWantBlock := false + if entry.WantType == pb.Message_Wantlist_Block { + isWantBlock = true + } + + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: int(entry.Priority), + Work: bsmsg.BlockPresenceSize(c), + Data: &taskData{ + BlockSize: 0, + HaveBlock: false, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) + } + } else { + // The block was found, add it to the queue + newWorkExists = true + + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", entry.Cid, "isWantBlock", isWantBlock) + + // entrySize is the amount of space the entry takes up in the + // message we send to the recipient. If we're sending a block, the + // entrySize is the size of the block. Otherwise it's the size of + // a block presence entry. + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(c) + } + activeEntries = append(activeEntries, peertask.Task{ + Topic: c, + Priority: int(entry.Priority), + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: entry.SendDontHave, + }, + }) + } + } + + // Push entries onto the request queue + if len(activeEntries) > 0 { + e.peerRequestQueue.PushTasks(p, activeEntries...) + } +} + +// Split the want-have / want-block entries from the cancel entries +func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { + wants := make([]bsmsg.Entry, 0, len(es)) + cancels := make([]bsmsg.Entry, 0, len(es)) + for _, et := range es { + if et.Cancel { + cancels = append(cancels, et) + } else { + wants = append(wants, et) + } + } + return wants, cancels +} + +// ReceiveFrom is called when new blocks are received and added to the block +// store, meaning there may be peers who want those blocks, so we should send +// the blocks to them. +// +// This function also updates the receive side of the ledger. +func (e *Engine) ReceiveFrom(from peer.ID, blks []blocks.Block, haves []cid.Cid) { + if len(blks) == 0 { + return + } + + if from != "" { + l := e.findOrCreate(from) + l.lk.Lock() + + // Record how many bytes were received in the ledger + for _, blk := range blks { + log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) + e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) + } + + l.lk.Unlock() + } + + // Get the size of each block + blockSizes := make(map[cid.Cid]int, len(blks)) + for _, blk := range blks { + blockSizes[blk.Cid()] = len(blk.RawData()) + } + + // Check each peer to see if it wants one of the blocks we received + work := false + e.lock.RLock() + + for _, l := range e.ledgerMap { + l.lk.RLock() + + for _, b := range blks { + k := b.Cid() + + if entry, ok := l.WantListContains(k); ok { + work = true + + blockSize := blockSizes[k] + isWantBlock := e.sendAsBlock(entry.WantType, blockSize) + + entrySize := blockSize + if !isWantBlock { + entrySize = bsmsg.BlockPresenceSize(k) + } + + e.peerRequestQueue.PushTasks(l.Partner, peertask.Task{ + Topic: entry.Cid, + Priority: int(entry.Priority), + Work: entrySize, + Data: &taskData{ + BlockSize: blockSize, + HaveBlock: true, + IsWantBlock: isWantBlock, + SendDontHave: false, + }, + }) + } + } + l.lk.RUnlock() + } + e.lock.RUnlock() + + if work { + e.signalNewWork() + } +} + +// TODO add contents of m.WantList() to my local wantlist? NB: could introduce +// race conditions where I send a message, but MessageSent gets handled after +// MessageReceived. The information in the local wantlist could become +// inconsistent. Would need to ensure that Sends and acknowledgement of the +// send happen atomically + +// MessageSent is called when a message has successfully been sent out, to record +// changes. +func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { + l := e.findOrCreate(p) + l.lk.Lock() + defer l.lk.Unlock() + + // Remove sent blocks from the want list for the peer + for _, block := range m.Blocks() { + e.scoreLedger.AddToSentBytes(l.Partner, len(block.RawData())) + l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) + } + + // Remove sent block presences from the want list for the peer + for _, bp := range m.BlockPresences() { + // Don't record sent data. We reserve that for data blocks. + if bp.Type == pb.Message_Have { + l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) + } + } +} + +// PeerConnected is called when a new peer connects, meaning we should start +// sending blocks. +func (e *Engine) PeerConnected(p peer.ID) { + e.lock.Lock() + defer e.lock.Unlock() + + _, ok := e.ledgerMap[p] + if !ok { + e.ledgerMap[p] = newLedger(p) + } + + e.scoreLedger.PeerConnected(p) +} + +// PeerDisconnected is called when a peer disconnects. +func (e *Engine) PeerDisconnected(p peer.ID) { + e.lock.Lock() + defer e.lock.Unlock() + + delete(e.ledgerMap, p) + + e.scoreLedger.PeerDisconnected(p) +} + +// If the want is a want-have, and it's below a certain size, send the full +// block (instead of sending a HAVE) +func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize int) bool { + isWantBlock := wantType == pb.Message_Wantlist_Block + return isWantBlock || blockSize <= e.maxBlockSizeReplaceHasWithBlock +} + +func (e *Engine) numBytesSentTo(p peer.ID) uint64 { + return e.LedgerForPeer(p).Sent +} + +func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { + return e.LedgerForPeer(p).Recv +} + +// ledger lazily instantiates a ledger +func (e *Engine) findOrCreate(p peer.ID) *ledger { + // Take a read lock (as it's less expensive) to check if we have a ledger + // for the peer + e.lock.RLock() + l, ok := e.ledgerMap[p] + e.lock.RUnlock() + if ok { + return l + } + + // There's no ledger, so take a write lock, then check again and create the + // ledger if necessary + e.lock.Lock() + defer e.lock.Unlock() + l, ok = e.ledgerMap[p] + if !ok { + l = newLedger(p) + e.ledgerMap[p] = l + } + return l +} + +func (e *Engine) signalNewWork() { + // Signal task generation to restart (if stopped!) + select { + case e.workSignal <- struct{}{}: + default: + } +} diff --git a/internal/decision/engine_test.go b/internal/decision/engine_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4587c6309115fbab6dc5c1ce0e497b7a74563973 --- /dev/null +++ b/internal/decision/engine_test.go @@ -0,0 +1,1197 @@ +package decision + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + "sync" + "testing" + "time" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + message "gitlab.dms3.io/dms3/go-bitswap/message" + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + + process "github.com/jbenet/goprocess" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + ds "gitlab.dms3.io/dms3/go-datastore" + dssync "gitlab.dms3.io/dms3/go-datastore/sync" + blockstore "gitlab.dms3.io/dms3/go-dms3-blockstore" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" + p2ptest "gitlab.dms3.io/p2p/go-p2p-core/test" +) + +type peerTag struct { + done chan struct{} + peers map[peer.ID]int +} + +type fakePeerTagger struct { + lk sync.Mutex + tags map[string]*peerTag +} + +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + if fpt.tags == nil { + fpt.tags = make(map[string]*peerTag, 1) + } + pt, ok := fpt.tags[tag] + if !ok { + pt = &peerTag{peers: make(map[peer.ID]int, 1), done: make(chan struct{})} + fpt.tags[tag] = pt + } + pt.peers[p] = n +} + +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + pt := fpt.tags[tag] + if pt == nil { + return + } + delete(pt.peers, p) + if len(pt.peers) == 0 { + close(pt.done) + delete(fpt.tags, tag) + } +} + +func (fpt *fakePeerTagger) count(tag string) int { + fpt.lk.Lock() + defer fpt.lk.Unlock() + if pt, ok := fpt.tags[tag]; ok { + return len(pt.peers) + } + return 0 +} + +func (fpt *fakePeerTagger) wait(tag string) { + fpt.lk.Lock() + pt := fpt.tags[tag] + if pt == nil { + fpt.lk.Unlock() + return + } + doneCh := pt.done + fpt.lk.Unlock() + <-doneCh +} + +type engineSet struct { + PeerTagger *fakePeerTagger + Peer peer.ID + Engine *Engine + Blockstore blockstore.Blockstore +} + +func newTestEngine(ctx context.Context, idStr string) engineSet { + return newTestEngineWithSampling(ctx, idStr, shortTerm, nil) +} + +func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}) engineSet { + fpt := &fakePeerTagger{} + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + e := newEngine(bs, 4, fpt, "localhost", 0, NewTestScoreLedger(peerSampleInterval, sampleCh)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + return engineSet{ + Peer: peer.ID(idStr), + //Strategy: New(true), + PeerTagger: fpt, + Blockstore: bs, + Engine: e, + } +} + +func TestConsistentAccounting(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sender := newTestEngine(ctx, "Ernie") + receiver := newTestEngine(ctx, "Bert") + + // Send messages from Ernie to Bert + for i := 0; i < 1000; i++ { + + m := message.New(false) + content := []string{"this", "is", "message", "i"} + m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) + + sender.Engine.MessageSent(receiver.Peer, m) + receiver.Engine.MessageReceived(ctx, sender.Peer, m) + receiver.Engine.ReceiveFrom(sender.Peer, m.Blocks(), nil) + } + + // Ensure sender records the change + if sender.Engine.numBytesSentTo(receiver.Peer) == 0 { + t.Fatal("Sent bytes were not recorded") + } + + // Ensure sender and receiver have the same values + if sender.Engine.numBytesSentTo(receiver.Peer) != receiver.Engine.numBytesReceivedFrom(sender.Peer) { + t.Fatal("Inconsistent book-keeping. Strategies don't agree") + } + + // Ensure sender didn't record receving anything. And that the receiver + // didn't record sending anything + if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 { + t.Fatal("Bert didn't send bytes to Ernie") + } +} + +func TestPeerIsAddedToPeersWhenMessageReceivedOrSent(t *testing.T) { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") + + m := message.New(true) + + sanfrancisco.Engine.MessageSent(seattle.Peer, m) + seattle.Engine.MessageReceived(ctx, sanfrancisco.Peer, m) + + if seattle.Peer == sanfrancisco.Peer { + t.Fatal("Sanity Check: Peers have same Key!") + } + + if !peerIsPartner(seattle.Peer, sanfrancisco.Engine) { + t.Fatal("Peer wasn't added as a Partner") + } + + if !peerIsPartner(sanfrancisco.Peer, seattle.Engine) { + t.Fatal("Peer wasn't added as a Partner") + } + + seattle.Engine.PeerDisconnected(sanfrancisco.Peer) + if peerIsPartner(sanfrancisco.Peer, seattle.Engine) { + t.Fatal("expected peer to be removed") + } +} + +func peerIsPartner(p peer.ID, e *Engine) bool { + for _, partner := range e.Peers() { + if partner == p { + return true + } + } + return false +} + +func TestOutboxClosedWhenEngineClosed(t *testing.T) { + t.SkipNow() // TODO implement *Engine.Close + e := newEngine(blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + var wg sync.WaitGroup + wg.Add(1) + go func() { + for nextEnvelope := range e.Outbox() { + <-nextEnvelope + } + wg.Done() + }() + // e.Close() + wg.Wait() + if _, ok := <-e.Outbox(); ok { + t.Fatal("channel should be closed") + } +} + +func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { + alphabet := "abcdefghijklmnopqrstuvwxyz" + vowels := "aeiou" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) + } + } + + partner := p2ptest.RandPeerIDFatal(t) + // partnerWantBlocks(e, vowels, partner) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Just send want-blocks + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + }, + }, + }, + + // Send want-blocks and want-haves + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, but without requesting DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: vowels, + wantHaves: "fgh123", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: vowels, + haves: "fgh", + dontHaves: "123", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, but without requesting DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "fgh", + dontHaves: "", + }, + }, + }, + + // Send want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "aeiou123", + wantHaves: "fgh456", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "fgh", + dontHaves: "123456", + }, + }, + }, + + // Send repeated want-blocks + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "io", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + }, + }, + }, + + // Send repeated want-blocks and want-haves + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae", + wantHaves: "jk", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "io", + wantHaves: "lm", + sendDontHave: false, + }, + testCaseEntry{ + wantBlks: "u", + sendDontHave: false, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "jklm", + }, + }, + }, + + // Send repeated want-blocks and want-haves, with some want-blocks and want-haves that are not + // present, and request DONT_HAVES + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "ae12", + wantHaves: "jk5", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "io34", + wantHaves: "lm", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "u", + wantHaves: "6", + sendDontHave: true, + }, + }, + exp: []testCaseExp{ + testCaseExp{ + blks: "aeiou", + haves: "jklm", + dontHaves: "123456", + }, + }, + }, + + // Send want-block then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "b", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-block should overwrite existing want-have + exp: []testCaseExp{ + testCaseExp{ + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + testCaseExp{ + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + for i, testCase := range testCases { + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + } + + for _, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + next := <-e.Outbox() + env := <-next + err := checkOutput(t, e, env, expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + env.Sent() + } + } +} + +func TestPartnerWantHaveWantBlockActive(t *testing.T) { + alphabet := "abcdefghijklmnopqrstuvwxyz" + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range strings.Split(alphabet, "") { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) + } + } + + partner := p2ptest.RandPeerIDFatal(t) + + type testCaseEntry struct { + wantBlks string + wantHaves string + sendDontHave bool + } + + type testCaseExp struct { + blks string + haves string + dontHaves string + } + + type testCase struct { + only bool + wls []testCaseEntry + exp []testCaseExp + } + + testCases := []testCase{ + // Send want-block then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // want-have should be ignored because there was already a + // want-block for the same CID in the queue + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "b", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "b", + sendDontHave: true, + }, + }, + // want-have is active when want-block is added, so want-have + // should get sent, then want-block + exp: []testCaseExp{ + testCaseExp{ + haves: "b", + }, + testCaseExp{ + blks: "b", + }, + }, + }, + + // Send want-block then want-block for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantBlks: "a", + sendDontHave: true, + }, + }, + // second want-block should be ignored + exp: []testCaseExp{ + testCaseExp{ + blks: "a", + }, + }, + }, + + // Send want-have then want-have for same CID + testCase{ + wls: []testCaseEntry{ + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + testCaseEntry{ + wantHaves: "a", + sendDontHave: true, + }, + }, + // second want-have should be ignored + exp: []testCaseExp{ + testCaseExp{ + haves: "a", + }, + }, + }, + } + + var onlyTestCases []testCase + for _, testCase := range testCases { + if testCase.only { + onlyTestCases = append(onlyTestCases, testCase) + } + } + if len(onlyTestCases) > 0 { + testCases = onlyTestCases + } + + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + var next envChan + for i, testCase := range testCases { + envs := make([]*Envelope, 0) + + t.Logf("Test case %d:", i) + for _, wl := range testCase.wls { + t.Logf(" want-blocks '%s' / want-haves '%s' / sendDontHave %t", + wl.wantBlks, wl.wantHaves, wl.sendDontHave) + wantBlks := strings.Split(wl.wantBlks, "") + wantHaves := strings.Split(wl.wantHaves, "") + partnerWantBlocksHaves(e, wantBlks, wantHaves, wl.sendDontHave, partner) + + var env *Envelope + next, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + envs = append(envs, env) + } + } + + if len(envs) != len(testCase.exp) { + t.Fatalf("Expected %d envelopes but received %d", len(testCase.exp), len(envs)) + } + + for i, exp := range testCase.exp { + expBlks := strings.Split(exp.blks, "") + expHaves := strings.Split(exp.haves, "") + expDontHaves := strings.Split(exp.dontHaves, "") + + err := checkOutput(t, e, envs[i], expBlks, expHaves, expDontHaves) + if err != nil { + t.Fatal(err) + } + envs[i].Sent() + } + } +} + +func checkOutput(t *testing.T, e *Engine, envelope *Envelope, expBlks []string, expHaves []string, expDontHaves []string) error { + blks := envelope.Message.Blocks() + presences := envelope.Message.BlockPresences() + + // Verify payload message length + if len(blks) != len(expBlks) { + blkDiff := formatBlocksDiff(blks, expBlks) + msg := fmt.Sprintf("Received %d blocks. Expected %d blocks:\n%s", len(blks), len(expBlks), blkDiff) + return errors.New(msg) + } + + // Verify block presences message length + expPresencesCount := len(expHaves) + len(expDontHaves) + if len(presences) != expPresencesCount { + presenceDiff := formatPresencesDiff(presences, expHaves, expDontHaves) + return fmt.Errorf("Received %d BlockPresences. Expected %d BlockPresences:\n%s", + len(presences), expPresencesCount, presenceDiff) + } + + // Verify payload message contents + for _, k := range expBlks { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, block := range blks { + if block.Cid().Equals(expected.Cid()) { + found = true + break + } + } + if !found { + return errors.New(formatBlocksDiff(blks, expBlks)) + } + } + + // Verify HAVEs + if err := checkPresence(presences, expHaves, pb.Message_Have); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + // Verify DONT_HAVEs + if err := checkPresence(presences, expDontHaves, pb.Message_DontHave); err != nil { + return errors.New(formatPresencesDiff(presences, expHaves, expDontHaves)) + } + + return nil +} + +func checkPresence(presences []message.BlockPresence, expPresence []string, presenceType pb.Message_BlockPresenceType) error { + for _, k := range expPresence { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, p := range presences { + if p.Cid.Equals(expected.Cid()) { + found = true + if p.Type != presenceType { + return errors.New("type mismatch") + } + break + } + } + if !found { + return errors.New("not found") + } + } + return nil +} + +func formatBlocksDiff(blks []blocks.Block, expBlks []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("Blocks (%d):\n", len(blks))) + for _, b := range blks { + out.WriteString(fmt.Sprintf(" %s: %s\n", b.Cid(), b.RawData())) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expBlks))) + for _, k := range expBlks { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s\n", expected.Cid(), k)) + } + return out.String() +} + +func formatPresencesDiff(presences []message.BlockPresence, expHaves []string, expDontHaves []string) string { + var out bytes.Buffer + out.WriteString(fmt.Sprintf("BlockPresences (%d):\n", len(presences))) + for _, p := range presences { + t := "HAVE" + if p.Type == pb.Message_DontHave { + t = "DONT_HAVE" + } + out.WriteString(fmt.Sprintf(" %s - %s\n", p.Cid, t)) + } + out.WriteString(fmt.Sprintf("Expected (%d):\n", len(expHaves)+len(expDontHaves))) + for _, k := range expHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - HAVE\n", expected.Cid(), k)) + } + for _, k := range expDontHaves { + expected := blocks.NewBlock([]byte(k)) + out.WriteString(fmt.Sprintf(" %s: %s - DONT_HAVE\n", expected.Cid(), k)) + } + return out.String() +} + +func TestPartnerWantsThenCancels(t *testing.T) { + numRounds := 10 + if testing.Short() { + numRounds = 1 + } + alphabet := strings.Split("abcdefghijklmnopqrstuvwxyz", "") + vowels := strings.Split("aeiou", "") + + type testCase [][]string + testcases := []testCase{ + { + alphabet, vowels, + }, + { + alphabet, stringsComplement(alphabet, vowels), + alphabet[1:25], stringsComplement(alphabet[1:25], vowels), alphabet[2:25], stringsComplement(alphabet[2:25], vowels), + alphabet[3:25], stringsComplement(alphabet[3:25], vowels), alphabet[4:25], stringsComplement(alphabet[4:25], vowels), + alphabet[5:25], stringsComplement(alphabet[5:25], vowels), alphabet[6:25], stringsComplement(alphabet[6:25], vowels), + alphabet[7:25], stringsComplement(alphabet[7:25], vowels), alphabet[8:25], stringsComplement(alphabet[8:25], vowels), + alphabet[9:25], stringsComplement(alphabet[9:25], vowels), alphabet[10:25], stringsComplement(alphabet[10:25], vowels), + alphabet[11:25], stringsComplement(alphabet[11:25], vowels), alphabet[12:25], stringsComplement(alphabet[12:25], vowels), + alphabet[13:25], stringsComplement(alphabet[13:25], vowels), alphabet[14:25], stringsComplement(alphabet[14:25], vowels), + alphabet[15:25], stringsComplement(alphabet[15:25], vowels), alphabet[16:25], stringsComplement(alphabet[16:25], vowels), + alphabet[17:25], stringsComplement(alphabet[17:25], vowels), alphabet[18:25], stringsComplement(alphabet[18:25], vowels), + alphabet[19:25], stringsComplement(alphabet[19:25], vowels), alphabet[20:25], stringsComplement(alphabet[20:25], vowels), + alphabet[21:25], stringsComplement(alphabet[21:25], vowels), alphabet[22:25], stringsComplement(alphabet[22:25], vowels), + alphabet[23:25], stringsComplement(alphabet[23:25], vowels), alphabet[24:25], stringsComplement(alphabet[24:25], vowels), + alphabet[25:25], stringsComplement(alphabet[25:25], vowels), + }, + } + + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + for _, letter := range alphabet { + block := blocks.NewBlock([]byte(letter)) + if err := bs.Put(block); err != nil { + t.Fatal(err) + } + } + + ctx := context.Background() + for i := 0; i < numRounds; i++ { + expected := make([][]string, 0, len(testcases)) + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + for _, testcase := range testcases { + set := testcase[0] + cancels := testcase[1] + keeps := stringsComplement(set, cancels) + expected = append(expected, keeps) + + partner := p2ptest.RandPeerIDFatal(t) + + partnerWantBlocks(e, set, partner) + partnerCancels(e, cancels, partner) + } + if err := checkHandledInOrder(t, e, expected); err != nil { + t.Logf("run #%d of %d", i, numRounds) + t.Fatal(err) + } + } +} + +func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := p2ptest.RandPeerIDFatal(t) + otherPeer := p2ptest.RandPeerIDFatal(t) + + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, false) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, so shouldn't get any envelope + var next envChan + next, env := getNextEnvelope(e, next, 5*time.Millisecond) + if env != nil { + t.Fatal("expected no envelope yet") + } + + if err := bs.PutMany([]blocks.Block{blks[0], blks[2]}); err != nil { + t.Fatal(err) + } + e.ReceiveFrom(otherPeer, []blocks.Block{blks[0], blks[2]}, []cid.Cid{}) + _, env = getNextEnvelope(e, next, 5*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + sentBlk := env.Message.Blocks() + if len(sentBlk) != 1 || !sentBlk[0].Cid().Equals(blks[2].Cid()) { + t.Fatal("expected 1 block") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 1 || !sentHave[0].Cid.Equals(blks[0].Cid()) || sentHave[0].Type != pb.Message_Have { + t.Fatal("expected 1 HAVE") + } +} + +func TestSendDontHave(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := p2ptest.RandPeerIDFatal(t) + otherPeer := p2ptest.RandPeerIDFatal(t) + + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 4, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, true) + msg.AddEntry(blks[2].Cid(), 2, pb.Message_Wantlist_Block, false) + msg.AddEntry(blks[3].Cid(), 1, pb.Message_Wantlist_Block, true) + e.MessageReceived(context.Background(), partner, msg) + + // Nothing in blockstore, should get DONT_HAVE for entries that wanted it + var next envChan + next, env := getNextEnvelope(e, next, 10*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) > 0 { + t.Fatal("expected no blocks") + } + sentDontHaves := env.Message.BlockPresences() + if len(sentDontHaves) != 2 { + t.Fatal("expected 2 DONT_HAVEs") + } + if !sentDontHaves[0].Cid.Equals(blks[1].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[1].Cid()) { + t.Fatal("expected DONT_HAVE for want-have") + } + if !sentDontHaves[0].Cid.Equals(blks[3].Cid()) && + !sentDontHaves[1].Cid.Equals(blks[3].Cid()) { + t.Fatal("expected DONT_HAVE for want-block") + } + + // Receive all the blocks + if err := bs.PutMany(blks); err != nil { + t.Fatal(err) + } + e.ReceiveFrom(otherPeer, blks, []cid.Cid{}) + + // Envelope should contain 2 HAVEs / 2 blocks + _, env = getNextEnvelope(e, next, 10*time.Millisecond) + if env == nil { + t.Fatal("expected envelope") + } + if env.Peer != partner { + t.Fatal("expected message to peer") + } + if len(env.Message.Blocks()) != 2 { + t.Fatal("expected 2 blocks") + } + sentHave := env.Message.BlockPresences() + if len(sentHave) != 2 || sentHave[0].Type != pb.Message_Have || sentHave[1].Type != pb.Message_Have { + t.Fatal("expected 2 HAVEs") + } +} + +func TestWantlistForPeer(t *testing.T) { + bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + partner := p2ptest.RandPeerIDFatal(t) + otherPeer := p2ptest.RandPeerIDFatal(t) + + e := newEngine(bs, 4, &fakePeerTagger{}, "localhost", 0, NewTestScoreLedger(shortTerm, nil)) + e.StartWorkers(context.Background(), process.WithTeardown(func() error { return nil })) + + blks := testutil.GenerateBlocksOfSize(4, 8*1024) + msg := message.New(false) + msg.AddEntry(blks[0].Cid(), 2, pb.Message_Wantlist_Have, false) + msg.AddEntry(blks[1].Cid(), 3, pb.Message_Wantlist_Have, false) + e.MessageReceived(context.Background(), partner, msg) + + msg2 := message.New(false) + msg2.AddEntry(blks[2].Cid(), 1, pb.Message_Wantlist_Block, false) + msg2.AddEntry(blks[3].Cid(), 4, pb.Message_Wantlist_Block, false) + e.MessageReceived(context.Background(), partner, msg2) + + entries := e.WantlistForPeer(otherPeer) + if len(entries) != 0 { + t.Fatal("expected wantlist to contain no wants for other peer") + } + + entries = e.WantlistForPeer(partner) + if len(entries) != 4 { + t.Fatal("expected wantlist to contain all wants from parter") + } + if entries[0].Priority != 4 || entries[1].Priority != 3 || entries[2].Priority != 2 || entries[3].Priority != 1 { + t.Fatal("expected wantlist to be sorted") + } + +} + +func TestTaggingPeers(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + sanfrancisco := newTestEngine(ctx, "sf") + seattle := newTestEngine(ctx, "sea") + + keys := []string{"a", "b", "c", "d", "e"} + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + if err := sanfrancisco.Blockstore.Put(block); err != nil { + t.Fatal(err) + } + } + partnerWantBlocks(sanfrancisco.Engine, keys, seattle.Peer) + next := <-sanfrancisco.Engine.Outbox() + envelope := <-next + + if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 1 { + t.Fatal("Incorrect number of peers tagged") + } + envelope.Sent() + <-sanfrancisco.Engine.Outbox() + sanfrancisco.PeerTagger.wait(sanfrancisco.Engine.tagQueued) + if sanfrancisco.PeerTagger.count(sanfrancisco.Engine.tagQueued) != 0 { + t.Fatal("Peers should be untagged but weren't") + } +} + +func TestTaggingUseful(t *testing.T) { + peerSampleInterval := 1 * time.Millisecond + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + sampleCh := make(chan struct{}) + me := newTestEngineWithSampling(ctx, "engine", peerSampleInterval, sampleCh) + friend := peer.ID("friend") + + block := blocks.NewBlock([]byte("foobar")) + msg := message.New(false) + msg.AddBlock(block) + + for i := 0; i < 3; i++ { + if untagged := me.PeerTagger.count(me.Engine.tagUseful); untagged != 0 { + t.Fatalf("%d peers should be untagged but weren't", untagged) + } + + me.Engine.MessageSent(friend, msg) + + for j := 0; j < 2; j++ { + <-sampleCh + } + + if tagged := me.PeerTagger.count(me.Engine.tagUseful); tagged != 1 { + t.Fatalf("1 peer should be tagged, but %d were", tagged) + } + + for j := 0; j < longTermRatio; j++ { + <-sampleCh + } + } + + if me.PeerTagger.count(me.Engine.tagUseful) == 0 { + t.Fatal("peers should still be tagged due to long-term usefulness") + } + + for j := 0; j < longTermRatio; j++ { + <-sampleCh + } + + if me.PeerTagger.count(me.Engine.tagUseful) == 0 { + t.Fatal("peers should still be tagged due to long-term usefulness") + } + + for j := 0; j < longTermRatio; j++ { + <-sampleCh + } + + if me.PeerTagger.count(me.Engine.tagUseful) != 0 { + t.Fatal("peers should finally be untagged") + } +} + +func partnerWantBlocks(e *Engine, keys []string, partner peer.ID) { + add := message.New(false) + for i, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), int32(len(keys)-i), pb.Message_Wantlist_Block, true) + } + e.MessageReceived(context.Background(), partner, add) +} + +func partnerWantBlocksHaves(e *Engine, keys []string, wantHaves []string, sendDontHave bool, partner peer.ID) { + add := message.New(false) + priority := int32(len(wantHaves) + len(keys)) + for _, letter := range wantHaves { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Have, sendDontHave) + priority-- + } + for _, letter := range keys { + block := blocks.NewBlock([]byte(letter)) + add.AddEntry(block.Cid(), priority, pb.Message_Wantlist_Block, sendDontHave) + priority-- + } + e.MessageReceived(context.Background(), partner, add) +} + +func partnerCancels(e *Engine, keys []string, partner peer.ID) { + cancels := message.New(false) + for _, k := range keys { + block := blocks.NewBlock([]byte(k)) + cancels.Cancel(block.Cid()) + } + e.MessageReceived(context.Background(), partner, cancels) +} + +type envChan <-chan *Envelope + +func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelope) { + ctx, cancel := context.WithTimeout(context.Background(), t) + defer cancel() + + if next == nil { + next = <-e.Outbox() // returns immediately + } + + select { + case env, ok := <-next: // blocks till next envelope ready + if !ok { + log.Warnf("got closed channel") + return nil, nil + } + return nil, env + case <-ctx.Done(): + // log.Warnf("got timeout") + } + return next, nil +} + +func checkHandledInOrder(t *testing.T, e *Engine, expected [][]string) error { + for _, keys := range expected { + next := <-e.Outbox() + envelope := <-next + received := envelope.Message.Blocks() + // Verify payload message length + if len(received) != len(keys) { + return errors.New(fmt.Sprintln("# blocks received", len(received), "# blocks expected", len(keys))) + } + // Verify payload message contents + for _, k := range keys { + found := false + expected := blocks.NewBlock([]byte(k)) + for _, block := range received { + if block.Cid().Equals(expected.Cid()) { + found = true + break + } + } + if !found { + return errors.New(fmt.Sprintln("received", received, "expected", string(expected.RawData()))) + } + } + } + return nil +} + +func stringsComplement(set, subset []string) []string { + m := make(map[string]struct{}) + for _, letter := range subset { + m[letter] = struct{}{} + } + var complement []string + for _, letter := range set { + if _, exists := m[letter]; !exists { + complement = append(complement, letter) + } + } + return complement +} diff --git a/internal/decision/ewma.go b/internal/decision/ewma.go new file mode 100644 index 0000000000000000000000000000000000000000..80d7d86b6dd910279e80ce8a3d233babc7ee6521 --- /dev/null +++ b/internal/decision/ewma.go @@ -0,0 +1,5 @@ +package decision + +func ewma(old, new, alpha float64) float64 { + return new*alpha + (1-alpha)*old +} diff --git a/internal/decision/ledger.go b/internal/decision/ledger.go new file mode 100644 index 0000000000000000000000000000000000000000..1b1c6d34d3347c18a48d820dfb62c533adcc3b6e --- /dev/null +++ b/internal/decision/ledger.go @@ -0,0 +1,42 @@ +package decision + +import ( + "sync" + + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + wl "gitlab.dms3.io/dms3/go-bitswap/wantlist" + + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +func newLedger(p peer.ID) *ledger { + return &ledger{ + wantList: wl.New(), + Partner: p, + } +} + +// Keeps the wantlist for the partner. NOT threadsafe! +type ledger struct { + // Partner is the remote Peer. + Partner peer.ID + + // wantList is a (bounded, small) set of keys that Partner desires. + wantList *wl.Wantlist + + lk sync.RWMutex +} + +func (l *ledger) Wants(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) { + log.Debugf("peer %s wants %s", l.Partner, k) + l.wantList.Add(k, priority, wantType) +} + +func (l *ledger) CancelWant(k cid.Cid) bool { + return l.wantList.Remove(k) +} + +func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { + return l.wantList.Contains(k) +} diff --git a/internal/decision/scoreledger.go b/internal/decision/scoreledger.go new file mode 100644 index 0000000000000000000000000000000000000000..6da9d15558a990cb51f5c4c8162e848cf338fd4e --- /dev/null +++ b/internal/decision/scoreledger.go @@ -0,0 +1,346 @@ +package decision + +import ( + "sync" + "time" + + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +const ( + // the alpha for the EWMA used to track short term usefulness + shortTermAlpha = 0.5 + + // the alpha for the EWMA used to track long term usefulness + longTermAlpha = 0.05 + + // how frequently the engine should sample usefulness. Peers that + // interact every shortTerm time period are considered "active". + shortTerm = 10 * time.Second + + // long term ratio defines what "long term" means in terms of the + // shortTerm duration. Peers that interact once every longTermRatio are + // considered useful over the long term. + longTermRatio = 10 + + // long/short term scores for tagging peers + longTermScore = 10 // this is a high tag but it grows _very_ slowly. + shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. +) + +// Stores the data exchange relationship between two peers. +type scoreledger struct { + // Partner is the remote Peer. + partner peer.ID + + // tracks bytes sent... + bytesSent uint64 + + // ...and received. + bytesRecv uint64 + + // lastExchange is the time of the last data exchange. + lastExchange time.Time + + // These scores keep track of how useful we think this peer is. Short + // tracks short-term usefulness and long tracks long-term usefulness. + shortScore, longScore float64 + + // Score keeps track of the score used in the peer tagger. We track it + // here to avoid unnecessarily updating the tags in the connection manager. + score int + + // exchangeCount is the number of exchanges with this peer + exchangeCount uint64 + + // the record lock + lock sync.RWMutex +} + +// Receipt is a summary of the ledger for a given peer +// collecting various pieces of aggregated data for external +// reporting purposes. +type Receipt struct { + Peer string + Value float64 + Sent uint64 + Recv uint64 + Exchanged uint64 +} + +// Increments the sent counter. +func (l *scoreledger) AddToSentBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + l.exchangeCount++ + l.lastExchange = time.Now() + l.bytesSent += uint64(n) +} + +// Increments the received counter. +func (l *scoreledger) AddToReceivedBytes(n int) { + l.lock.Lock() + defer l.lock.Unlock() + l.exchangeCount++ + l.lastExchange = time.Now() + l.bytesRecv += uint64(n) +} + +// Returns the Receipt for this ledger record. +func (l *scoreledger) Receipt() *Receipt { + l.lock.RLock() + defer l.lock.RUnlock() + + return &Receipt{ + Peer: l.partner.String(), + Value: float64(l.bytesSent) / float64(l.bytesRecv+1), + Sent: l.bytesSent, + Recv: l.bytesRecv, + Exchanged: l.exchangeCount, + } +} + +// DefaultScoreLedger is used by Engine as the default ScoreLedger. +type DefaultScoreLedger struct { + // the score func + scorePeer ScorePeerFunc + // is closed on Close + closing chan struct{} + // protects the fields immediatly below + lock sync.RWMutex + // ledgerMap lists score ledgers by their partner key. + ledgerMap map[peer.ID]*scoreledger + // how frequently the engine should sample peer usefulness + peerSampleInterval time.Duration + // used by the tests to detect when a sample is taken + sampleCh chan struct{} +} + +// scoreWorker keeps track of how "useful" our peers are, updating scores in the +// connection manager. +// +// It does this by tracking two scores: short-term usefulness and long-term +// usefulness. Short-term usefulness is sampled frequently and highly weights +// new observations. Long-term usefulness is sampled less frequently and highly +// weights on long-term trends. +// +// In practice, we do this by keeping two EWMAs. If we see an interaction +// within the sampling period, we record the score, otherwise, we record a 0. +// The short-term one has a high alpha and is sampled every shortTerm period. +// The long-term one has a low alpha and is sampled every +// longTermRatio*shortTerm period. +// +// To calculate the final score, we sum the short-term and long-term scores then +// adjust it ±25% based on our debt ratio. Peers that have historically been +// more useful to us than we are to them get the highest score. +func (dsl *DefaultScoreLedger) scoreWorker() { + ticker := time.NewTicker(dsl.peerSampleInterval) + defer ticker.Stop() + + type update struct { + peer peer.ID + score int + } + var ( + lastShortUpdate, lastLongUpdate time.Time + updates []update + ) + + for i := 0; ; i = (i + 1) % longTermRatio { + var now time.Time + select { + case now = <-ticker.C: + case <-dsl.closing: + return + } + + // The long term update ticks every `longTermRatio` short + // intervals. + updateLong := i == 0 + + dsl.lock.Lock() + for _, l := range dsl.ledgerMap { + l.lock.Lock() + + // Update the short-term score. + if l.lastExchange.After(lastShortUpdate) { + l.shortScore = ewma(l.shortScore, shortTermScore, shortTermAlpha) + } else { + l.shortScore = ewma(l.shortScore, 0, shortTermAlpha) + } + + // Update the long-term score. + if updateLong { + if l.lastExchange.After(lastLongUpdate) { + l.longScore = ewma(l.longScore, longTermScore, longTermAlpha) + } else { + l.longScore = ewma(l.longScore, 0, longTermAlpha) + } + } + + // Calculate the new score. + // + // The accounting score adjustment prefers peers _we_ + // need over peers that need us. This doesn't help with + // leeching. + var lscore float64 + if l.bytesRecv == 0 { + lscore = 0 + } else { + lscore = float64(l.bytesRecv) / float64(l.bytesRecv+l.bytesSent) + } + score := int((l.shortScore + l.longScore) * (lscore*.5 + .75)) + + // Avoid updating the connection manager unless there's a change. This can be expensive. + if l.score != score { + // put these in a list so we can perform the updates outside _global_ the lock. + updates = append(updates, update{l.partner, score}) + l.score = score + } + l.lock.Unlock() + } + dsl.lock.Unlock() + + // record the times. + lastShortUpdate = now + if updateLong { + lastLongUpdate = now + } + + // apply the updates + for _, update := range updates { + dsl.scorePeer(update.peer, update.score) + } + // Keep the memory. It's not much and it saves us from having to allocate. + updates = updates[:0] + + // Used by the tests + if dsl.sampleCh != nil { + dsl.sampleCh <- struct{}{} + } + } +} + +// Returns the score ledger for the given peer or nil if that peer +// is not on the ledger. +func (dsl *DefaultScoreLedger) find(p peer.ID) *scoreledger { + // Take a read lock (as it's less expensive) to check if we have + // a ledger for the peer. + dsl.lock.RLock() + l, ok := dsl.ledgerMap[p] + dsl.lock.RUnlock() + if ok { + return l + } + return nil +} + +// Returns a new scoreledger. +func newScoreLedger(p peer.ID) *scoreledger { + return &scoreledger{ + partner: p, + } +} + +// Lazily instantiates a ledger. +func (dsl *DefaultScoreLedger) findOrCreate(p peer.ID) *scoreledger { + l := dsl.find(p) + if l != nil { + return l + } + + // There's no ledger, so take a write lock, then check again and + // create the ledger if necessary. + dsl.lock.Lock() + defer dsl.lock.Unlock() + l, ok := dsl.ledgerMap[p] + if !ok { + l = newScoreLedger(p) + dsl.ledgerMap[p] = l + } + return l +} + +// GetReceipt returns aggregated data communication with a given peer. +func (dsl *DefaultScoreLedger) GetReceipt(p peer.ID) *Receipt { + l := dsl.find(p) + if l != nil { + return l.Receipt() + } + + // Return a blank receipt otherwise. + return &Receipt{ + Peer: p.String(), + Value: 0, + Sent: 0, + Recv: 0, + Exchanged: 0, + } +} + +// Starts the default ledger sampling process. +func (dsl *DefaultScoreLedger) Start(scorePeer ScorePeerFunc) { + dsl.init(scorePeer) + go dsl.scoreWorker() +} + +// Stops the sampling process. +func (dsl *DefaultScoreLedger) Stop() { + close(dsl.closing) +} + +// Initializes the score ledger. +func (dsl *DefaultScoreLedger) init(scorePeer ScorePeerFunc) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + dsl.scorePeer = scorePeer +} + +// Increments the sent counter for the given peer. +func (dsl *DefaultScoreLedger) AddToSentBytes(p peer.ID, n int) { + l := dsl.findOrCreate(p) + l.AddToSentBytes(n) +} + +// Increments the received counter for the given peer. +func (dsl *DefaultScoreLedger) AddToReceivedBytes(p peer.ID, n int) { + l := dsl.findOrCreate(p) + l.AddToReceivedBytes(n) +} + +// PeerConnected should be called when a new peer connects, meaning +// we should open accounting. +func (dsl *DefaultScoreLedger) PeerConnected(p peer.ID) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + _, ok := dsl.ledgerMap[p] + if !ok { + dsl.ledgerMap[p] = newScoreLedger(p) + } +} + +// PeerDisconnected should be called when a peer disconnects to +// clean up the accounting. +func (dsl *DefaultScoreLedger) PeerDisconnected(p peer.ID) { + dsl.lock.Lock() + defer dsl.lock.Unlock() + delete(dsl.ledgerMap, p) +} + +// Creates a new instance of the default score ledger. +func NewDefaultScoreLedger() *DefaultScoreLedger { + return &DefaultScoreLedger{ + ledgerMap: make(map[peer.ID]*scoreledger), + closing: make(chan struct{}), + peerSampleInterval: shortTerm, + } +} + +// Creates a new instance of the default score ledger with testing +// parameters. +func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}) *DefaultScoreLedger { + dsl := NewDefaultScoreLedger() + dsl.peerSampleInterval = peerSampleInterval + dsl.sampleCh = sampleCh + return dsl +} diff --git a/internal/decision/taskmerger.go b/internal/decision/taskmerger.go new file mode 100644 index 0000000000000000000000000000000000000000..629be547a7ad4c885caa0438ca5ca81f62c4d216 --- /dev/null +++ b/internal/decision/taskmerger.go @@ -0,0 +1,87 @@ +package decision + +import ( + "gitlab.dms3.io/dms3/go-peertaskqueue/peertask" +) + +// taskData is extra data associated with each task in the request queue +type taskData struct { + // Tasks can be want-have or want-block + IsWantBlock bool + // Whether to immediately send a response if the block is not found + SendDontHave bool + // The size of the block corresponding to the task + BlockSize int + // Whether the block was found + HaveBlock bool +} + +type taskMerger struct{} + +func newTaskMerger() *taskMerger { + return &taskMerger{} +} + +// The request queue uses this Method to decide if a newly pushed task has any +// new information beyond the tasks with the same Topic (CID) in the queue. +func (*taskMerger) HasNewInfo(task peertask.Task, existing []peertask.Task) bool { + haveSize := false + isWantBlock := false + for _, et := range existing { + etd := et.Data.(*taskData) + if etd.HaveBlock { + haveSize = true + } + + if etd.IsWantBlock { + isWantBlock = true + } + } + + // If there is no active want-block and the new task is a want-block, + // the new task is better + newTaskData := task.Data.(*taskData) + if !isWantBlock && newTaskData.IsWantBlock { + return true + } + + // If there is no size information for the CID and the new task has + // size information, the new task is better + if !haveSize && newTaskData.HaveBlock { + return true + } + + return false +} + +// The request queue uses Merge to merge a newly pushed task with an existing +// task with the same Topic (CID) +func (*taskMerger) Merge(task peertask.Task, existing *peertask.Task) { + newTask := task.Data.(*taskData) + existingTask := existing.Data.(*taskData) + + // If we now have block size information, update the task with + // the new block size + if !existingTask.HaveBlock && newTask.HaveBlock { + existingTask.HaveBlock = newTask.HaveBlock + existingTask.BlockSize = newTask.BlockSize + } + + // If replacing a want-have with a want-block + if !existingTask.IsWantBlock && newTask.IsWantBlock { + // Change the type from want-have to want-block + existingTask.IsWantBlock = true + // If the want-have was a DONT_HAVE, or the want-block has a size + if !existingTask.HaveBlock || newTask.HaveBlock { + // Update the entry size + existingTask.HaveBlock = newTask.HaveBlock + existing.Work = task.Work + } + } + + // If the task is a want-block, make sure the entry size is equal + // to the block size (because we will send the whole block) + if existingTask.IsWantBlock && existingTask.HaveBlock { + existing.Work = existingTask.BlockSize + } +} diff --git a/internal/decision/taskmerger_test.go b/internal/decision/taskmerger_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a3501a1a9084aded5c48c3cdfd7e32e982516bea --- /dev/null +++ b/internal/decision/taskmerger_test.go @@ -0,0 +1,357 @@ +package decision + +import ( + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + "gitlab.dms3.io/dms3/go-peertaskqueue" + "gitlab.dms3.io/dms3/go-peertaskqueue/peertask" +) + +func TestPushHaveVsBlock(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + isWantBlock := popped[0].Data.(*taskData).IsWantBlock + if isWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, isWantBlock) + } + } + const wantBlockType = true + const wantHaveType = false + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHaveType) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlockType) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlockType) + // want-block overwrites want-have + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlockType) +} + +func TestPushSizeInfo(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlockBlockSize := 10 + wantBlockDontHaveBlockSize := 0 + wantHaveBlockSize := 10 + wantHaveDontHaveBlockSize := 0 + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: wantBlockDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveBlockSize, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: wantHaveDontHaveBlockSize, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expSize int, expBlockSize int, expIsWantBlock bool) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + ptq.PushTasks(partner, tasks...) + _, popped, _ := ptq.PopTasks(100) + if len(popped) != 1 { + t.Fatalf("Expected 1 task, received %d tasks", len(popped)) + } + if popped[0].Work != expSize { + t.Fatalf("Expected task.Work to be %d, received %d", expSize, popped[0].Work) + } + td := popped[0].Data.(*taskData) + if td.BlockSize != expBlockSize { + t.Fatalf("Expected task.Work to be %d, received %d", expBlockSize, td.BlockSize) + } + if td.IsWantBlock != expIsWantBlock { + t.Fatalf("Expected task.IsWantBlock to be %t, received %t", expIsWantBlock, td.IsWantBlock) + } + } + + isWantBlock := true + isWantHave := false + + // want-block (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-block with size should update existing want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-block (DONT_HAVE) size, + // but leave it as a want-block (ie should not change it to want-have) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + + // want-block (DONT_HAVE) size should not update existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-block with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should have no effect on existing want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, wantBlockDontHave.Work, wantBlockDontHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should have no effect on existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, wantHaveDontHave.Work, wantHaveDontHaveBlockSize, isWantHave) + // want-block with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have with size should update existing want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + + // want-block (DONT_HAVE) should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, wantHaveBlockSize, wantHaveBlockSize, isWantBlock) + // want-have (DONT_HAVE) should not update existing want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, wantHave.Work, wantHaveBlockSize, isWantHave) + // want-block with size should update type and entry size of existing want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, wantBlock.Work, wantBlockBlockSize, isWantBlock) + // want-have should have no effect on existing want-have + runTestCase([]peertask.Task{wantHave, wantHave}, wantHave.Work, wantHaveBlockSize, isWantHave) +} + +func TestPushHaveVsBlockActive(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expCount int) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + // ptq.PushTasks(partner, tasks...) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + // tracker.PushTasks([]peertask.Task{task}) + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != expCount { + t.Fatalf("Expected %d tasks, received %d tasks", expCount, len(popped)) + } + } + + // should ignore second want-have + runTestCase([]peertask.Task{wantHave, wantHave}, 1) + // should ignore second want-block + runTestCase([]peertask.Task{wantBlock, wantBlock}, 1) + // want-have does not overwrite want-block + runTestCase([]peertask.Task{wantBlock, wantHave}, 1) + // can't replace want-have with want-block because want-have is active + runTestCase([]peertask.Task{wantHave, wantBlock}, 2) +} + +func TestPushSizeInfoActive(t *testing.T) { + partner := testutil.GeneratePeers(1)[0] + + wantBlock := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 10, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantBlockDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 2, + Data: &taskData{ + IsWantBlock: true, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + wantHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 10, + HaveBlock: true, + SendDontHave: false, + }, + } + wantHaveDontHave := peertask.Task{ + Topic: "1", + Priority: 10, + Work: 1, + Data: &taskData{ + IsWantBlock: false, + BlockSize: 0, + HaveBlock: false, + SendDontHave: false, + }, + } + + runTestCase := func(tasks []peertask.Task, expTasks []peertask.Task) { + tasks = cloneTasks(tasks) + ptq := peertaskqueue.New(peertaskqueue.TaskMerger(newTaskMerger())) + var popped []*peertask.Task + for _, task := range tasks { + // Push the task + ptq.PushTasks(partner, task) + // Pop the task (which makes it active) + _, poppedTasks, _ := ptq.PopTasks(10) + popped = append(popped, poppedTasks...) + } + if len(popped) != len(expTasks) { + t.Fatalf("Expected %d tasks, received %d tasks", len(expTasks), len(popped)) + } + for i, task := range popped { + td := task.Data.(*taskData) + expTd := expTasks[i].Data.(*taskData) + if td.IsWantBlock != expTd.IsWantBlock { + t.Fatalf("Expected IsWantBlock to be %t, received %t", expTd.IsWantBlock, td.IsWantBlock) + } + if task.Work != expTasks[i].Work { + t.Fatalf("Expected Size to be %d, received %d", expTasks[i].Work, task.Work) + } + } + } + + // second want-block (DONT_HAVE) should be ignored + runTestCase([]peertask.Task{wantBlockDontHave, wantBlockDontHave}, []peertask.Task{wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHaveDontHave}, []peertask.Task{wantBlockDontHave}) + // want-block with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantBlock}, []peertask.Task{wantBlockDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-block (DONT_HAVE) + runTestCase([]peertask.Task{wantBlockDontHave, wantHave}, []peertask.Task{wantBlockDontHave, wantHave}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlockDontHave}, []peertask.Task{wantHaveDontHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHaveDontHave}, []peertask.Task{wantHaveDontHave}) + // want-block with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantBlock}, []peertask.Task{wantHaveDontHave, wantBlock}) + // want-have with size should be added if there is existing active want-have (DONT_HAVE) + runTestCase([]peertask.Task{wantHaveDontHave, wantHave}, []peertask.Task{wantHaveDontHave, wantHave}) + + // want-block (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantBlockDontHave}, []peertask.Task{wantBlock}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHaveDontHave}, []peertask.Task{wantBlock}) + // second want-block with size should be ignored + runTestCase([]peertask.Task{wantBlock, wantBlock}, []peertask.Task{wantBlock}) + // want-have with size should be ignored if there is existing active want-block with size + runTestCase([]peertask.Task{wantBlock, wantHave}, []peertask.Task{wantBlock}) + + // want-block (DONT_HAVE) should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlockDontHave}, []peertask.Task{wantHave, wantBlockDontHave}) + // want-have (DONT_HAVE) should be ignored if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantHaveDontHave}, []peertask.Task{wantHave}) + // second want-have with size should be ignored + runTestCase([]peertask.Task{wantHave, wantHave}, []peertask.Task{wantHave}) + // want-block with size should be added if there is existing active want-have with size + runTestCase([]peertask.Task{wantHave, wantBlock}, []peertask.Task{wantHave, wantBlock}) +} + +func cloneTasks(tasks []peertask.Task) []peertask.Task { + var cp []peertask.Task + for _, t := range tasks { + td := t.Data.(*taskData) + cp = append(cp, peertask.Task{ + Topic: t.Topic, + Priority: t.Priority, + Work: t.Work, + Data: &taskData{ + IsWantBlock: td.IsWantBlock, + BlockSize: td.BlockSize, + HaveBlock: td.HaveBlock, + SendDontHave: td.SendDontHave, + }, + }) + } + return cp +} diff --git a/internal/getter/getter.go b/internal/getter/getter.go new file mode 100644 index 0000000000000000000000000000000000000000..2160ba3c942eff5d77c088dd954e4c2f4f301175 --- /dev/null +++ b/internal/getter/getter.go @@ -0,0 +1,132 @@ +package getter + +import ( + "context" + "errors" + + notifications "gitlab.dms3.io/dms3/go-bitswap/internal/notifications" + logging "gitlab.dms3.io/dms3/go-log" + + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + blockstore "gitlab.dms3.io/dms3/go-dms3-blockstore" +) + +var log = logging.Logger("bitswap") + +// GetBlocksFunc is any function that can take an array of CIDs and return a +// channel of incoming blocks. +type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) + +// SyncGetBlock takes a block cid and an async function for getting several +// blocks that returns a channel, and uses that function to return the +// block syncronously. +func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { + if !k.Defined() { + log.Error("undefined cid in GetBlock") + return nil, blockstore.ErrNotFound + } + + // Any async work initiated by this function must end when this function + // returns. To ensure this, derive a new context. Note that it is okay to + // listen on parent in this scope, but NOT okay to pass |parent| to + // functions called by this one. Otherwise those functions won't return + // when this context's cancel func is executed. This is difficult to + // enforce. May this comment keep you safe. + ctx, cancel := context.WithCancel(p) + defer cancel() + + promise, err := gb(ctx, []cid.Cid{k}) + if err != nil { + return nil, err + } + + select { + case block, ok := <-promise: + if !ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + return nil, errors.New("promise channel was closed") + } + } + return block, nil + case <-p.Done(): + return nil, p.Err() + } +} + +// WantFunc is any function that can express a want for set of blocks. +type WantFunc func(context.Context, []cid.Cid) + +// AsyncGetBlocks take a set of block cids, a pubsub channel for incoming +// blocks, a want function, and a close function, and returns a channel of +// incoming blocks. +func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, + want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { + + // If there are no keys supplied, just return a closed channel + if len(keys) == 0 { + out := make(chan blocks.Block) + close(out) + return out, nil + } + + // Use a PubSub notifier to listen for incoming blocks for each key + remaining := cid.NewSet() + promise := notif.Subscribe(ctx, keys...) + for _, k := range keys { + log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k) + remaining.Add(k) + } + + // Send the want request for the keys to the network + want(ctx, keys) + + out := make(chan blocks.Block) + go handleIncoming(ctx, sessctx, remaining, promise, out, cwants) + return out, nil +} + +// Listens for incoming blocks, passing them to the out channel. +// If the context is cancelled or the incoming channel closes, calls cfun with +// any keys corresponding to blocks that were never received. +func handleIncoming(ctx context.Context, sessctx context.Context, remaining *cid.Set, + in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { + + ctx, cancel := context.WithCancel(ctx) + + // Clean up before exiting this function, and call the cancel function on + // any remaining keys + defer func() { + cancel() + close(out) + // can't just defer this call on its own, arguments are resolved *when* the defer is created + cfun(remaining.Keys()) + }() + + for { + select { + case blk, ok := <-in: + // If the channel is closed, we're done (note that PubSub closes + // the channel once all the keys have been received) + if !ok { + return + } + + remaining.Remove(blk.Cid()) + select { + case out <- blk: + case <-ctx.Done(): + return + case <-sessctx.Done(): + return + } + case <-ctx.Done(): + return + case <-sessctx.Done(): + return + } + } +} diff --git a/internal/messagequeue/donthavetimeoutmgr.go b/internal/messagequeue/donthavetimeoutmgr.go new file mode 100644 index 0000000000000000000000000000000000000000..fac76976033ab964f4abb1edaead0000728139a9 --- /dev/null +++ b/internal/messagequeue/donthavetimeoutmgr.go @@ -0,0 +1,375 @@ +package messagequeue + +import ( + "context" + "sync" + "time" + + cid "gitlab.dms3.io/dms3/go-cid" + "gitlab.dms3.io/p2p/go-p2p/p2p/protocol/ping" +) + +const ( + // dontHaveTimeout is used to simulate a DONT_HAVE when communicating with + // a peer whose Bitswap client doesn't support the DONT_HAVE response, + // or when the peer takes too long to respond. + // If the peer doesn't respond to a want-block within the timeout, the + // local node assumes that the peer doesn't have the block. + dontHaveTimeout = 5 * time.Second + + // maxExpectedWantProcessTime is the maximum amount of time we expect a + // peer takes to process a want and initiate sending a response to us + maxExpectedWantProcessTime = 2 * time.Second + + // maxTimeout is the maximum allowed timeout, regardless of latency + maxTimeout = dontHaveTimeout + maxExpectedWantProcessTime + + // pingLatencyMultiplier is multiplied by the average ping time to + // get an upper bound on how long we expect to wait for a peer's response + // to arrive + pingLatencyMultiplier = 3 + + // messageLatencyAlpha is the alpha supplied to the message latency EWMA + messageLatencyAlpha = 0.5 + + // To give a margin for error, the timeout is calculated as + // messageLatencyMultiplier * message latency + messageLatencyMultiplier = 2 +) + +// PeerConnection is a connection to a peer that can be pinged, and the +// average latency measured +type PeerConnection interface { + // Ping the peer + Ping(context.Context) ping.Result + // The average latency of all pings + Latency() time.Duration +} + +// pendingWant keeps track of a want that has been sent and we're waiting +// for a response or for a timeout to expire +type pendingWant struct { + c cid.Cid + active bool + sent time.Time +} + +// dontHaveTimeoutMgr simulates a DONT_HAVE message if the peer takes too long +// to respond to a message. +// The timeout is based on latency - we start with a default latency, while +// we ping the peer to estimate latency. If we receive a response from the +// peer we use the response latency. +type dontHaveTimeoutMgr struct { + ctx context.Context + shutdown func() + peerConn PeerConnection + onDontHaveTimeout func([]cid.Cid) + defaultTimeout time.Duration + maxTimeout time.Duration + pingLatencyMultiplier int + messageLatencyMultiplier int + maxExpectedWantProcessTime time.Duration + + // All variables below here must be protected by the lock + lk sync.RWMutex + // has the timeout manager started + started bool + // wants that are active (waiting for a response or timeout) + activeWants map[cid.Cid]*pendingWant + // queue of wants, from oldest to newest + wantQueue []*pendingWant + // time to wait for a response (depends on latency) + timeout time.Duration + // ewma of message latency (time from message sent to response received) + messageLatency *latencyEwma + // timer used to wait until want at front of queue expires + checkForTimeoutsTimer *time.Timer +} + +// newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr +// onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) +func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid)) *dontHaveTimeoutMgr { + return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, + pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime) +} + +// newDontHaveTimeoutMgrWithParams is used by the tests +func newDontHaveTimeoutMgrWithParams( + pc PeerConnection, + onDontHaveTimeout func([]cid.Cid), + defaultTimeout time.Duration, + maxTimeout time.Duration, + pingLatencyMultiplier int, + messageLatencyMultiplier int, + maxExpectedWantProcessTime time.Duration) *dontHaveTimeoutMgr { + + ctx, shutdown := context.WithCancel(context.Background()) + mqp := &dontHaveTimeoutMgr{ + ctx: ctx, + shutdown: shutdown, + peerConn: pc, + activeWants: make(map[cid.Cid]*pendingWant), + timeout: defaultTimeout, + messageLatency: &latencyEwma{alpha: messageLatencyAlpha}, + defaultTimeout: defaultTimeout, + maxTimeout: maxTimeout, + pingLatencyMultiplier: pingLatencyMultiplier, + messageLatencyMultiplier: messageLatencyMultiplier, + maxExpectedWantProcessTime: maxExpectedWantProcessTime, + onDontHaveTimeout: onDontHaveTimeout, + } + + return mqp +} + +// Shutdown the dontHaveTimeoutMgr. Any subsequent call to Start() will be ignored +func (dhtm *dontHaveTimeoutMgr) Shutdown() { + dhtm.shutdown() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Clear any pending check for timeouts + if dhtm.checkForTimeoutsTimer != nil { + dhtm.checkForTimeoutsTimer.Stop() + } +} + +// Start the dontHaveTimeoutMgr. This method is idempotent +func (dhtm *dontHaveTimeoutMgr) Start() { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Make sure the dont have timeout manager hasn't already been started + if dhtm.started { + return + } + dhtm.started = true + + // If we already have a measure of latency to the peer, use it to + // calculate a reasonable timeout + latency := dhtm.peerConn.Latency() + if latency.Nanoseconds() > 0 { + dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) + return + } + + // Otherwise measure latency by pinging the peer + go dhtm.measurePingLatency() +} + +// UpdateMessageLatency is called when we receive a response from the peer. +// It is the time between sending a request and receiving the corresponding +// response. +func (dhtm *dontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Update the message latency and the timeout + dhtm.messageLatency.update(elapsed) + oldTimeout := dhtm.timeout + dhtm.timeout = dhtm.calculateTimeoutFromMessageLatency() + + // If the timeout has decreased + if dhtm.timeout < oldTimeout { + // Check if after changing the timeout there are any pending wants that + // are now over the timeout + dhtm.checkForTimeouts() + } +} + +// measurePingLatency measures the latency to the peer by pinging it +func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { + // Wait up to defaultTimeout for a response to the ping + ctx, cancel := context.WithTimeout(dhtm.ctx, dhtm.defaultTimeout) + defer cancel() + + // Ping the peer + res := dhtm.peerConn.Ping(ctx) + if res.Error != nil { + // If there was an error, we'll just leave the timeout as + // defaultTimeout + return + } + + // Get the average latency to the peer + latency := dhtm.peerConn.Latency() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // A message has arrived so we already set the timeout based on message latency + if dhtm.messageLatency.samples > 0 { + return + } + + // Calculate a reasonable timeout based on latency + dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) + + // Check if after changing the timeout there are any pending wants that are + // now over the timeout + dhtm.checkForTimeouts() +} + +// checkForTimeouts checks pending wants to see if any are over the timeout. +// Note: this function should only be called within the lock. +func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { + if len(dhtm.wantQueue) == 0 { + return + } + + // Figure out which of the blocks that were wanted were not received + // within the timeout + expired := make([]cid.Cid, 0, len(dhtm.activeWants)) + for len(dhtm.wantQueue) > 0 { + pw := dhtm.wantQueue[0] + + // If the want is still active + if pw.active { + // The queue is in order from earliest to latest, so if we + // didn't find an expired entry we can stop iterating + if time.Since(pw.sent) < dhtm.timeout { + break + } + + // Add the want to the expired list + expired = append(expired, pw.c) + // Remove the want from the activeWants map + delete(dhtm.activeWants, pw.c) + } + + // Remove expired or cancelled wants from the want queue + dhtm.wantQueue = dhtm.wantQueue[1:] + } + + // Fire the timeout event for the expired wants + if len(expired) > 0 { + go dhtm.fireTimeout(expired) + } + + if len(dhtm.wantQueue) == 0 { + return + } + + // Make sure the timeout manager is still running + if dhtm.ctx.Err() != nil { + return + } + + // Schedule the next check for the moment when the oldest pending want will + // timeout + oldestStart := dhtm.wantQueue[0].sent + until := time.Until(oldestStart.Add(dhtm.timeout)) + if dhtm.checkForTimeoutsTimer == nil { + dhtm.checkForTimeoutsTimer = time.AfterFunc(until, func() { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + dhtm.checkForTimeouts() + }) + } else { + dhtm.checkForTimeoutsTimer.Stop() + dhtm.checkForTimeoutsTimer.Reset(until) + } +} + +// AddPending adds the given keys that will expire if not cancelled before +// the timeout +func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { + if len(ks) == 0 { + return + } + + start := time.Now() + + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + queueWasEmpty := len(dhtm.activeWants) == 0 + + // Record the start time for each key + for _, c := range ks { + if _, ok := dhtm.activeWants[c]; !ok { + pw := pendingWant{ + c: c, + sent: start, + active: true, + } + dhtm.activeWants[c] = &pw + dhtm.wantQueue = append(dhtm.wantQueue, &pw) + } + } + + // If there was already an earlier pending item in the queue, then there + // must already be a timeout check scheduled. If there is nothing in the + // queue then we should make sure to schedule a check. + if queueWasEmpty { + dhtm.checkForTimeouts() + } +} + +// CancelPending is called when we receive a response for a key +func (dhtm *dontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { + dhtm.lk.Lock() + defer dhtm.lk.Unlock() + + // Mark the wants as cancelled + for _, c := range ks { + if pw, ok := dhtm.activeWants[c]; ok { + pw.active = false + delete(dhtm.activeWants, c) + } + } +} + +// fireTimeout fires the onDontHaveTimeout method with the timed out keys +func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { + // Make sure the timeout manager has not been shut down + if dhtm.ctx.Err() != nil { + return + } + + // Fire the timeout + dhtm.onDontHaveTimeout(pending) +} + +// calculateTimeoutFromPingLatency calculates a reasonable timeout derived from latency +func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromPingLatency(latency time.Duration) time.Duration { + // The maximum expected time for a response is + // the expected time to process the want + (latency * multiplier) + // The multiplier is to provide some padding for variable latency. + timeout := dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.pingLatencyMultiplier)*latency + if timeout > dhtm.maxTimeout { + timeout = dhtm.maxTimeout + } + return timeout +} + +// calculateTimeoutFromMessageLatency calculates a timeout derived from message latency +func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromMessageLatency() time.Duration { + timeout := dhtm.messageLatency.latency * time.Duration(dhtm.messageLatencyMultiplier) + if timeout > dhtm.maxTimeout { + timeout = dhtm.maxTimeout + } + return timeout +} + +// latencyEwma is an EWMA of message latency +type latencyEwma struct { + alpha float64 + samples uint64 + latency time.Duration +} + +// update the EWMA with the given sample +func (le *latencyEwma) update(elapsed time.Duration) { + le.samples++ + + // Initially set alpha to be 1.0 / + alpha := 1.0 / float64(le.samples) + if alpha < le.alpha { + // Once we have enough samples, clamp alpha + alpha = le.alpha + } + le.latency = time.Duration(float64(elapsed)*alpha + (1-alpha)*float64(le.latency)) +} diff --git a/internal/messagequeue/donthavetimeoutmgr_test.go b/internal/messagequeue/donthavetimeoutmgr_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bf9e418d518c55c7c07524f1a40ec76a38f6f62c --- /dev/null +++ b/internal/messagequeue/donthavetimeoutmgr_test.go @@ -0,0 +1,395 @@ +package messagequeue + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" + "gitlab.dms3.io/p2p/go-p2p/p2p/protocol/ping" +) + +type mockPeerConn struct { + err error + latency time.Duration + latencies []time.Duration +} + +func (pc *mockPeerConn) Ping(ctx context.Context) ping.Result { + timer := time.NewTimer(pc.latency) + select { + case <-timer.C: + if pc.err != nil { + return ping.Result{Error: pc.err} + } + pc.latencies = append(pc.latencies, pc.latency) + case <-ctx.Done(): + } + return ping.Result{RTT: pc.latency} +} + +func (pc *mockPeerConn) Latency() time.Duration { + sum := time.Duration(0) + if len(pc.latencies) == 0 { + return sum + } + for _, l := range pc.latencies { + sum += l + } + return sum / time.Duration(len(pc.latencies)) +} + +type timeoutRecorder struct { + timedOutKs []cid.Cid + lk sync.Mutex +} + +func (tr *timeoutRecorder) onTimeout(tks []cid.Cid) { + tr.lk.Lock() + defer tr.lk.Unlock() + + tr.timedOutKs = append(tr.timedOutKs, tks...) +} + +func (tr *timeoutRecorder) timedOutCount() int { + tr.lk.Lock() + defer tr.lk.Unlock() + + return len(tr.timedOutKs) +} + +func (tr *timeoutRecorder) clear() { + tr.lk.Lock() + defer tr.lk.Unlock() + + tr.timedOutKs = nil +} + +func TestDontHaveTimeoutMgrTimeout(t *testing.T) { + firstks := testutil.GenerateCids(2) + secondks := append(firstks, testutil.GenerateCids(3)...) + latency := time.Millisecond * 20 + latMultiplier := 2 + expProcessTime := 5 * time.Millisecond + expectedTimeout := expProcessTime + latency*time.Duration(latMultiplier) + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add first set of keys + dhtm.AddPending(firstks) + + // Wait for less than the expected timeout + time.Sleep(expectedTimeout - 10*time.Millisecond) + + // At this stage no keys should have timed out + if tr.timedOutCount() > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Add second set of keys + dhtm.AddPending(secondks) + + // Wait until after the expected timeout + time.Sleep(20 * time.Millisecond) + + // At this stage first set of keys should have timed out + if tr.timedOutCount() != len(firstks) { + t.Fatal("expected timeout", tr.timedOutCount(), len(firstks)) + } + + // Clear the recorded timed out keys + tr.clear() + + // Sleep until the second set of keys should have timed out + time.Sleep(expectedTimeout + 10*time.Millisecond) + + // At this stage all keys should have timed out. The second set included + // the first set of keys, but they were added before the first set timed + // out, so only the remaining keys should have beed added. + if tr.timedOutCount() != len(secondks)-len(firstks) { + t.Fatal("expected second set of keys to timeout") + } +} + +func TestDontHaveTimeoutMgrCancel(t *testing.T) { + ks := testutil.GenerateCids(3) + latency := time.Millisecond * 10 + latMultiplier := 1 + expProcessTime := time.Duration(0) + expectedTimeout := latency + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + time.Sleep(5 * time.Millisecond) + + // Cancel keys + cancelCount := 1 + dhtm.CancelPending(ks[:cancelCount]) + + // Wait for the expected timeout + time.Sleep(expectedTimeout) + + // At this stage all non-cancelled keys should have timed out + if tr.timedOutCount() != len(ks)-cancelCount { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutWantCancelWant(t *testing.T) { + ks := testutil.GenerateCids(3) + latency := time.Millisecond * 20 + latMultiplier := 1 + expProcessTime := time.Duration(0) + expectedTimeout := latency + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // Wait for a short time + time.Sleep(expectedTimeout - 10*time.Millisecond) + + // Cancel two keys + dhtm.CancelPending(ks[:2]) + + time.Sleep(5 * time.Millisecond) + + // Add back one cancelled key + dhtm.AddPending(ks[:1]) + + // Wait till after initial timeout + time.Sleep(10 * time.Millisecond) + + // At this stage only the key that was never cancelled should have timed out + if tr.timedOutCount() != 1 { + t.Fatal("expected one key to timeout") + } + + // Wait till after added back key should time out + time.Sleep(latency) + + // At this stage the key that was added back should also have timed out + if tr.timedOutCount() != 2 { + t.Fatal("expected added back key to timeout") + } +} + +func TestDontHaveTimeoutRepeatedAddPending(t *testing.T) { + ks := testutil.GenerateCids(10) + latency := time.Millisecond * 5 + latMultiplier := 1 + expProcessTime := time.Duration(0) + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys repeatedly + for _, c := range ks { + dhtm.AddPending([]cid.Cid{c}) + } + + // Wait for the expected timeout + time.Sleep(latency + 5*time.Millisecond) + + // At this stage all keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutMgrMessageLatency(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 40 + latMultiplier := 1 + expProcessTime := time.Duration(0) + msgLatencyMultiplier := 1 + pc := &mockPeerConn{latency: latency} + tr := timeoutRecorder{} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, msgLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // expectedTimeout + // = expProcessTime + latency*time.Duration(latMultiplier) + // = 0 + 40ms * 1 + // = 40ms + + // Wait for less than the expected timeout + time.Sleep(25 * time.Millisecond) + + // Receive two message latency updates + dhtm.UpdateMessageLatency(time.Millisecond * 20) + dhtm.UpdateMessageLatency(time.Millisecond * 10) + + // alpha is 0.5 so timeout should be + // = (20ms * alpha) + (10ms * (1 - alpha)) + // = (20ms * 0.5) + (10ms * 0.5) + // = 15ms + // We've already slept for 25ms so with the new 15ms timeout + // the keys should have timed out + + // Give the queue some time to process the updates + time.Sleep(5 * time.Millisecond) + + if tr.timedOutCount() != len(ks) { + t.Fatal("expected keys to timeout") + } +} + +func TestDontHaveTimeoutMgrMessageLatencyMax(t *testing.T) { + ks := testutil.GenerateCids(2) + pc := &mockPeerConn{latency: time.Second} // ignored + tr := timeoutRecorder{} + msgLatencyMultiplier := 1 + testMaxTimeout := time.Millisecond * 10 + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, testMaxTimeout, pingLatencyMultiplier, msgLatencyMultiplier, maxExpectedWantProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // Receive a message latency update that would make the timeout greater + // than the maximum timeout + dhtm.UpdateMessageLatency(testMaxTimeout * 4) + + // Sleep until just after the maximum timeout + time.Sleep(testMaxTimeout + 5*time.Millisecond) + + // Keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected keys to timeout") + } +} + +func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfPingError(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 1 + latMultiplier := 2 + expProcessTime := 2 * time.Millisecond + defaultTimeout := 10 * time.Millisecond + expectedTimeout := expProcessTime + defaultTimeout + tr := timeoutRecorder{} + pc := &mockPeerConn{latency: latency, err: fmt.Errorf("ping error")} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // Sleep for less than the expected timeout + time.Sleep(expectedTimeout - 5*time.Millisecond) + + // At this stage no timeout should have happened yet + if tr.timedOutCount() > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Sleep until after the expected timeout + time.Sleep(10 * time.Millisecond) + + // Now the keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutMgrUsesDefaultTimeoutIfLatencyLonger(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 20 + latMultiplier := 1 + expProcessTime := time.Duration(0) + defaultTimeout := 10 * time.Millisecond + tr := timeoutRecorder{} + pc := &mockPeerConn{latency: latency} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + defaultTimeout, dontHaveTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // Sleep for less than the default timeout + time.Sleep(defaultTimeout - 5*time.Millisecond) + + // At this stage no timeout should have happened yet + if tr.timedOutCount() > 0 { + t.Fatal("expected timeout not to have happened yet") + } + + // Sleep until after the default timeout + time.Sleep(defaultTimeout * 2) + + // Now the keys should have timed out + if tr.timedOutCount() != len(ks) { + t.Fatal("expected timeout") + } +} + +func TestDontHaveTimeoutNoTimeoutAfterShutdown(t *testing.T) { + ks := testutil.GenerateCids(2) + latency := time.Millisecond * 10 + latMultiplier := 1 + expProcessTime := time.Duration(0) + tr := timeoutRecorder{} + pc := &mockPeerConn{latency: latency} + + dhtm := newDontHaveTimeoutMgrWithParams(pc, tr.onTimeout, + dontHaveTimeout, maxTimeout, latMultiplier, messageLatencyMultiplier, expProcessTime) + dhtm.Start() + defer dhtm.Shutdown() + + // Add keys + dhtm.AddPending(ks) + + // Wait less than the timeout + time.Sleep(latency - 5*time.Millisecond) + + // Shutdown the manager + dhtm.Shutdown() + + // Wait for the expected timeout + time.Sleep(10 * time.Millisecond) + + // Manager was shut down so timeout should not have fired + if tr.timedOutCount() != 0 { + t.Fatal("expected no timeout after shutdown") + } +} diff --git a/internal/messagequeue/messagequeue.go b/internal/messagequeue/messagequeue.go new file mode 100644 index 0000000000000000000000000000000000000000..fda4ea26c27f92d9190c27a7fa93e1f0ae9f5ce5 --- /dev/null +++ b/internal/messagequeue/messagequeue.go @@ -0,0 +1,826 @@ +package messagequeue + +import ( + "context" + "math" + "sync" + "time" + + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + bswl "gitlab.dms3.io/dms3/go-bitswap/wantlist" + cid "gitlab.dms3.io/dms3/go-cid" + logging "gitlab.dms3.io/dms3/go-log" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" + "gitlab.dms3.io/p2p/go-p2p/p2p/protocol/ping" + "go.uber.org/zap" +) + +var log = logging.Logger("bitswap") +var sflog = log.Desugar() + +const ( + defaultRebroadcastInterval = 30 * time.Second + // maxRetries is the number of times to attempt to send a message before + // giving up + maxRetries = 3 + sendTimeout = 30 * time.Second + // maxMessageSize is the maximum message size in bytes + maxMessageSize = 1024 * 1024 * 2 + // sendErrorBackoff is the time to wait before retrying to connect after + // an error when trying to send a message + sendErrorBackoff = 100 * time.Millisecond + // maxPriority is the max priority as defined by the bitswap protocol + maxPriority = math.MaxInt32 + // sendMessageDebounce is the debounce duration when calling sendMessage() + sendMessageDebounce = time.Millisecond + // when we reach sendMessageCutoff wants/cancels, we'll send the message immediately. + sendMessageCutoff = 256 + // when we debounce for more than sendMessageMaxDelay, we'll send the + // message immediately. + sendMessageMaxDelay = 20 * time.Millisecond + // The maximum amount of time in which to accept a response as being valid + // for latency calculation (as opposed to discarding it as an outlier) + maxValidLatency = 30 * time.Second +) + +// MessageNetwork is any network that can connect peers and generate a message +// sender. +type MessageNetwork interface { + ConnectTo(context.Context, peer.ID) error + NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) + Latency(peer.ID) time.Duration + Ping(context.Context, peer.ID) ping.Result + Self() peer.ID +} + +// MessageQueue implements queue of want messages to send to peers. +type MessageQueue struct { + ctx context.Context + shutdown func() + p peer.ID + network MessageNetwork + dhTimeoutMgr DontHaveTimeoutManager + + // The maximum size of a message in bytes. Any overflow is put into the + // next message + maxMessageSize int + + // The amount of time to wait when there's an error sending to a peer + // before retrying + sendErrorBackoff time.Duration + + // The maximum amount of time in which to accept a response as being valid + // for latency calculation + maxValidLatency time.Duration + + // Signals that there are outgoing wants / cancels ready to be processed + outgoingWork chan time.Time + + // Channel of CIDs of blocks / HAVEs / DONT_HAVEs received from the peer + responses chan []cid.Cid + + // Take lock whenever any of these variables are modified + wllock sync.Mutex + bcstWants recallWantlist + peerWants recallWantlist + cancels *cid.Set + priority int32 + + // Dont touch any of these variables outside of run loop + sender bsnet.MessageSender + rebroadcastIntervalLk sync.RWMutex + rebroadcastInterval time.Duration + rebroadcastTimer *time.Timer + // For performance reasons we just clear out the fields of the message + // instead of creating a new one every time. + msg bsmsg.BitSwapMessage +} + +// recallWantlist keeps a list of pending wants and a list of sent wants +type recallWantlist struct { + // The list of wants that have not yet been sent + pending *bswl.Wantlist + // The list of wants that have been sent + sent *bswl.Wantlist + // The time at which each want was sent + sentAt map[cid.Cid]time.Time +} + +func newRecallWantList() recallWantlist { + return recallWantlist{ + pending: bswl.New(), + sent: bswl.New(), + sentAt: make(map[cid.Cid]time.Time), + } +} + +// Add want to the pending list +func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlist_WantType) { + r.pending.Add(c, priority, wtype) +} + +// Remove wants from both the pending list and the list of sent wants +func (r *recallWantlist) Remove(c cid.Cid) { + r.pending.Remove(c) + r.sent.Remove(c) + delete(r.sentAt, c) +} + +// Remove wants by type from both the pending list and the list of sent wants +func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { + r.pending.RemoveType(c, wtype) + r.sent.RemoveType(c, wtype) + if _, ok := r.sent.Contains(c); !ok { + delete(r.sentAt, c) + } +} + +// MarkSent moves the want from the pending to the sent list +// +// Returns true if the want was marked as sent. Returns false if the want wasn't +// pending. +func (r *recallWantlist) MarkSent(e bswl.Entry) bool { + if !r.pending.RemoveType(e.Cid, e.WantType) { + return false + } + r.sent.Add(e.Cid, e.Priority, e.WantType) + return true +} + +// SentAt records the time at which a want was sent +func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { + // The want may have been cancelled in the interim + if _, ok := r.sent.Contains(c); ok { + if _, ok := r.sentAt[c]; !ok { + r.sentAt[c] = at + } + } +} + +// ClearSentAt clears out the record of the time a want was sent. +// We clear the sent at time when we receive a response for a key as we +// only need the first response for latency measurement. +func (r *recallWantlist) ClearSentAt(c cid.Cid) { + delete(r.sentAt, c) +} + +type peerConn struct { + p peer.ID + network MessageNetwork +} + +func newPeerConnection(p peer.ID, network MessageNetwork) *peerConn { + return &peerConn{p, network} +} + +func (pc *peerConn) Ping(ctx context.Context) ping.Result { + return pc.network.Ping(ctx, pc.p) +} + +func (pc *peerConn) Latency() time.Duration { + return pc.network.Latency(pc.p) +} + +// Fires when a timeout occurs waiting for a response from a peer running an +// older version of Bitswap that doesn't support DONT_HAVE messages. +type OnDontHaveTimeout func(peer.ID, []cid.Cid) + +// DontHaveTimeoutManager pings a peer to estimate latency so it can set a reasonable +// upper bound on when to consider a DONT_HAVE request as timed out (when connected to +// a peer that doesn't support DONT_HAVE messages) +type DontHaveTimeoutManager interface { + // Start the manager (idempotent) + Start() + // Shutdown the manager (Shutdown is final, manager cannot be restarted) + Shutdown() + // AddPending adds the wants as pending a response. If the are not + // cancelled before the timeout, the OnDontHaveTimeout method will be called. + AddPending([]cid.Cid) + // CancelPending removes the wants + CancelPending([]cid.Cid) + // UpdateMessageLatency informs the manager of a new latency measurement + UpdateMessageLatency(time.Duration) +} + +// New creates a new MessageQueue. +func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { + onTimeout := func(ks []cid.Cid) { + log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) + onDontHaveTimeout(p, ks) + } + dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout) + return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr) +} + +// This constructor is used by the tests +func newMessageQueue( + ctx context.Context, + p peer.ID, + network MessageNetwork, + maxMsgSize int, + sendErrorBackoff time.Duration, + maxValidLatency time.Duration, + dhTimeoutMgr DontHaveTimeoutManager) *MessageQueue { + + ctx, cancel := context.WithCancel(ctx) + return &MessageQueue{ + ctx: ctx, + shutdown: cancel, + p: p, + network: network, + dhTimeoutMgr: dhTimeoutMgr, + maxMessageSize: maxMsgSize, + bcstWants: newRecallWantList(), + peerWants: newRecallWantList(), + cancels: cid.NewSet(), + outgoingWork: make(chan time.Time, 1), + responses: make(chan []cid.Cid, 8), + rebroadcastInterval: defaultRebroadcastInterval, + sendErrorBackoff: sendErrorBackoff, + maxValidLatency: maxValidLatency, + priority: maxPriority, + // For performance reasons we just clear out the fields of the message + // after using it, instead of creating a new one every time. + msg: bsmsg.New(false), + } +} + +// Add want-haves that are part of a broadcast to all connected peers +func (mq *MessageQueue) AddBroadcastWantHaves(wantHaves []cid.Cid) { + if len(wantHaves) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.bcstWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + + // Schedule a message send + mq.signalWorkReady() +} + +// Add want-haves and want-blocks for the peer for this message queue. +func (mq *MessageQueue) AddWants(wantBlocks []cid.Cid, wantHaves []cid.Cid) { + if len(wantBlocks) == 0 && len(wantHaves) == 0 { + return + } + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, c := range wantHaves { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Have) + mq.priority-- + + // We're adding a want-have for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + for _, c := range wantBlocks { + mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Block) + mq.priority-- + + // We're adding a want-block for the cid, so clear any pending cancel + // for the cid + mq.cancels.Remove(c) + } + + // Schedule a message send + mq.signalWorkReady() +} + +// Add cancel messages for the given keys. +func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { + if len(cancelKs) == 0 { + return + } + + // Cancel any outstanding DONT_HAVE timers + mq.dhTimeoutMgr.CancelPending(cancelKs) + + mq.wllock.Lock() + + workReady := false + + // Remove keys from broadcast and peer wants, and add to cancels + for _, c := range cancelKs { + // Check if a want for the key was sent + _, wasSentBcst := mq.bcstWants.sent.Contains(c) + _, wasSentPeer := mq.peerWants.sent.Contains(c) + + // Remove the want from tracking wantlists + mq.bcstWants.Remove(c) + mq.peerWants.Remove(c) + + // Only send a cancel if a want was sent + if wasSentBcst || wasSentPeer { + mq.cancels.Add(c) + workReady = true + } + } + + mq.wllock.Unlock() + + // Unlock first to be nice to the scheduler. + + // Schedule a message send + if workReady { + mq.signalWorkReady() + } +} + +// ResponseReceived is called when a message is received from the network. +// ks is the set of blocks, HAVEs and DONT_HAVEs in the message +// Note that this is just used to calculate latency. +func (mq *MessageQueue) ResponseReceived(ks []cid.Cid) { + if len(ks) == 0 { + return + } + + // These messages are just used to approximate latency, so if we get so + // many responses that they get backed up, just ignore the overflow. + select { + case mq.responses <- ks: + default: + } +} + +// SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist +func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { + mq.rebroadcastIntervalLk.Lock() + mq.rebroadcastInterval = delay + if mq.rebroadcastTimer != nil { + mq.rebroadcastTimer.Reset(delay) + } + mq.rebroadcastIntervalLk.Unlock() +} + +// Startup starts the processing of messages and rebroadcasting. +func (mq *MessageQueue) Startup() { + mq.rebroadcastIntervalLk.RLock() + mq.rebroadcastTimer = time.NewTimer(mq.rebroadcastInterval) + mq.rebroadcastIntervalLk.RUnlock() + go mq.runQueue() +} + +// Shutdown stops the processing of messages for a message queue. +func (mq *MessageQueue) Shutdown() { + mq.shutdown() +} + +func (mq *MessageQueue) onShutdown() { + // Shut down the DONT_HAVE timeout manager + mq.dhTimeoutMgr.Shutdown() + + // Reset the streamMessageSender + if mq.sender != nil { + _ = mq.sender.Reset() + } +} + +func (mq *MessageQueue) runQueue() { + defer mq.onShutdown() + + // Create a timer for debouncing scheduled work. + scheduleWork := time.NewTimer(0) + if !scheduleWork.Stop() { + // Need to drain the timer if Stop() returns false + // See: https://golang.org/pkg/time/#Timer.Stop + <-scheduleWork.C + } + + var workScheduled time.Time + for mq.ctx.Err() == nil { + select { + case <-mq.rebroadcastTimer.C: + mq.rebroadcastWantlist() + + case when := <-mq.outgoingWork: + // If we have work scheduled, cancel the timer. If we + // don't, record when the work was scheduled. + // We send the time on the channel so we accurately + // track delay. + if workScheduled.IsZero() { + workScheduled = when + } else if !scheduleWork.Stop() { + // Need to drain the timer if Stop() returns false + <-scheduleWork.C + } + + // If we have too many updates and/or we've waited too + // long, send immediately. + if mq.pendingWorkCount() > sendMessageCutoff || + time.Since(workScheduled) >= sendMessageMaxDelay { + mq.sendIfReady() + workScheduled = time.Time{} + } else { + // Otherwise, extend the timer. + scheduleWork.Reset(sendMessageDebounce) + } + + case <-scheduleWork.C: + // We have work scheduled and haven't seen any updates + // in sendMessageDebounce. Send immediately. + workScheduled = time.Time{} + mq.sendIfReady() + + case res := <-mq.responses: + // We received a response from the peer, calculate latency + mq.handleResponse(res) + + case <-mq.ctx.Done(): + return + } + } +} + +// Periodically resend the list of wants to the peer +func (mq *MessageQueue) rebroadcastWantlist() { + mq.rebroadcastIntervalLk.RLock() + mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) + mq.rebroadcastIntervalLk.RUnlock() + + // If some wants were transferred from the rebroadcast list + if mq.transferRebroadcastWants() { + // Send them out + mq.sendMessage() + } +} + +// Transfer wants from the rebroadcast lists into the pending lists. +func (mq *MessageQueue) transferRebroadcastWants() bool { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + // Check if there are any wants to rebroadcast + if mq.bcstWants.sent.Len() == 0 && mq.peerWants.sent.Len() == 0 { + return false + } + + // Copy sent wants into pending wants lists + mq.bcstWants.pending.Absorb(mq.bcstWants.sent) + mq.peerWants.pending.Absorb(mq.peerWants.sent) + + return true +} + +func (mq *MessageQueue) signalWorkReady() { + select { + case mq.outgoingWork <- time.Now(): + default: + } +} + +func (mq *MessageQueue) sendIfReady() { + if mq.hasPendingWork() { + mq.sendMessage() + } +} + +func (mq *MessageQueue) sendMessage() { + sender, err := mq.initializeSender() + if err != nil { + // If we fail to initialize the sender, the networking layer will + // emit a Disconnect event and the MessageQueue will get cleaned up + log.Infof("Could not open message sender to peer %s: %s", mq.p, err) + mq.Shutdown() + return + } + + // Make sure the DONT_HAVE timeout manager has started + // Note: Start is idempotent + mq.dhTimeoutMgr.Start() + + // Convert want lists to a Bitswap Message + message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) + + // After processing the message, clear out its fields to save memory + defer mq.msg.Reset(false) + + if message.Empty() { + return + } + + wantlist := message.Wantlist() + mq.logOutgoingMessage(wantlist) + + if err := sender.SendMsg(mq.ctx, message); err != nil { + // If the message couldn't be sent, the networking layer will + // emit a Disconnect event and the MessageQueue will get cleaned up + log.Infof("Could not send message to peer %s: %s", mq.p, err) + mq.Shutdown() + return + } + + // Record sent time so as to calculate message latency + onSent() + + // Set a timer to wait for responses + mq.simulateDontHaveWithTimeout(wantlist) + + // If the message was too big and only a subset of wants could be + // sent, schedule sending the rest of the wants in the next + // iteration of the event loop. + if mq.hasPendingWork() { + mq.signalWorkReady() + } +} + +// If want-block times out, simulate a DONT_HAVE reponse. +// This is necessary when making requests to peers running an older version of +// Bitswap that doesn't support the DONT_HAVE response, and is also useful to +// mitigate getting blocked by a peer that takes a long time to respond. +func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { + // Get the CID of each want-block that expects a DONT_HAVE response + wants := make([]cid.Cid, 0, len(wantlist)) + + mq.wllock.Lock() + + for _, entry := range wantlist { + if entry.WantType == pb.Message_Wantlist_Block && entry.SendDontHave { + // Unlikely, but just in case check that the block hasn't been + // received in the interim + c := entry.Cid + if _, ok := mq.peerWants.sent.Contains(c); ok { + wants = append(wants, c) + } + } + } + + mq.wllock.Unlock() + + // Add wants to DONT_HAVE timeout manager + mq.dhTimeoutMgr.AddPending(wants) +} + +// handleResponse is called when a response is received from the peer, +// with the CIDs of received blocks / HAVEs / DONT_HAVEs +func (mq *MessageQueue) handleResponse(ks []cid.Cid) { + now := time.Now() + earliest := time.Time{} + + mq.wllock.Lock() + + // Check if the keys in the response correspond to any request that was + // sent to the peer. + // + // - Find the earliest request so as to calculate the longest latency as + // we want to be conservative when setting the timeout + // - Ignore latencies that are very long, as these are likely to be outliers + // caused when + // - we send a want to peer A + // - peer A does not have the block + // - peer A later receives the block from peer B + // - peer A sends us HAVE / block + for _, c := range ks { + if at, ok := mq.bcstWants.sentAt[c]; ok { + if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { + earliest = at + } + mq.bcstWants.ClearSentAt(c) + } + if at, ok := mq.peerWants.sentAt[c]; ok { + if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { + earliest = at + } + // Clear out the sent time for the CID because we only want to + // record the latency between the request and the first response + // for that CID (not subsequent responses) + mq.peerWants.ClearSentAt(c) + } + } + + mq.wllock.Unlock() + + if !earliest.IsZero() { + // Inform the timeout manager of the calculated latency + mq.dhTimeoutMgr.UpdateMessageLatency(now.Sub(earliest)) + } +} + +func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { + // Save some CPU cycles and allocations if log level is higher than debug + if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { + return + } + + self := mq.network.Self() + for _, e := range wantlist { + if e.Cancel { + if e.WantType == pb.Message_Wantlist_Have { + log.Debugw("sent message", + "type", "CANCEL_WANT_HAVE", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } else { + log.Debugw("sent message", + "type", "CANCEL_WANT_BLOCK", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } + } else { + if e.WantType == pb.Message_Wantlist_Have { + log.Debugw("sent message", + "type", "WANT_HAVE", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } else { + log.Debugw("sent message", + "type", "WANT_BLOCK", + "cid", e.Cid, + "local", self, + "to", mq.p, + ) + } + } + } +} + +// Whether there is work to be processed +func (mq *MessageQueue) hasPendingWork() bool { + return mq.pendingWorkCount() > 0 +} + +// The amount of work that is waiting to be processed +func (mq *MessageQueue) pendingWorkCount() int { + mq.wllock.Lock() + defer mq.wllock.Unlock() + + return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() +} + +// Convert the lists of wants into a Bitswap message +func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { + // Get broadcast and regular wantlist entries. + mq.wllock.Lock() + peerEntries := mq.peerWants.pending.Entries() + bcstEntries := mq.bcstWants.pending.Entries() + cancels := mq.cancels.Keys() + if !supportsHave { + filteredPeerEntries := peerEntries[:0] + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // don't send want-haves (only send want-blocks) + // + // Doing this here under the lock makes everything else in this + // function simpler. + // + // TODO: We should _try_ to avoid recording these in the first + // place if possible. + for _, e := range peerEntries { + if e.WantType == pb.Message_Wantlist_Have { + mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) + } else { + filteredPeerEntries = append(filteredPeerEntries, e) + } + } + peerEntries = filteredPeerEntries + } + mq.wllock.Unlock() + + // We prioritize cancels, then regular wants, then broadcast wants. + + var ( + msgSize = 0 // size of message so far + sentCancels = 0 // number of cancels in message + sentPeerEntries = 0 // number of peer entries in message + sentBcstEntries = 0 // number of broadcast entries in message + ) + + // Add each cancel to the message + for _, c := range cancels { + msgSize += mq.msg.Cancel(c) + sentCancels++ + + if msgSize >= mq.maxMessageSize { + goto FINISH + } + } + + // Next, add the wants. If we have too many entries to fit into a single + // message, sort by priority and include the high priority ones first. + // However, avoid sorting till we really need to as this code is a + // called frequently. + + // Add each regular want-have / want-block to the message. + if msgSize+(len(peerEntries)*bsmsg.MaxEntrySize) > mq.maxMessageSize { + bswl.SortEntries(peerEntries) + } + + for _, e := range peerEntries { + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) + sentPeerEntries++ + + if msgSize >= mq.maxMessageSize { + goto FINISH + } + } + + // Add each broadcast want-have to the message. + if msgSize+(len(bcstEntries)*bsmsg.MaxEntrySize) > mq.maxMessageSize { + bswl.SortEntries(bcstEntries) + } + + // Add each broadcast want-have to the message + for _, e := range bcstEntries { + // Broadcast wants are sent as want-have + wantType := pb.Message_Wantlist_Have + + // If the remote peer doesn't support HAVE / DONT_HAVE messages, + // send a want-block instead + if !supportsHave { + wantType = pb.Message_Wantlist_Block + } + + msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) + sentBcstEntries++ + + if msgSize >= mq.maxMessageSize { + goto FINISH + } + } + +FINISH: + + // Finally, re-take the lock, mark sent and remove any entries from our + // message that we've decided to cancel at the last minute. + mq.wllock.Lock() + for i, e := range peerEntries[:sentPeerEntries] { + if !mq.peerWants.MarkSent(e) { + // It changed. + mq.msg.Remove(e.Cid) + peerEntries[i].Cid = cid.Undef + } + } + + for i, e := range bcstEntries[:sentBcstEntries] { + if !mq.bcstWants.MarkSent(e) { + mq.msg.Remove(e.Cid) + bcstEntries[i].Cid = cid.Undef + } + } + + for _, c := range cancels[:sentCancels] { + if !mq.cancels.Has(c) { + mq.msg.Remove(c) + } else { + mq.cancels.Remove(c) + } + } + mq.wllock.Unlock() + + // When the message has been sent, record the time at which each want was + // sent so we can calculate message latency + onSent := func() { + now := time.Now() + + mq.wllock.Lock() + defer mq.wllock.Unlock() + + for _, e := range peerEntries[:sentPeerEntries] { + if e.Cid.Defined() { // Check if want was cancelled in the interim + mq.peerWants.SentAt(e.Cid, now) + } + } + + for _, e := range bcstEntries[:sentBcstEntries] { + if e.Cid.Defined() { // Check if want was cancelled in the interim + mq.bcstWants.SentAt(e.Cid, now) + } + } + } + + return mq.msg, onSent +} + +func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { + if mq.sender == nil { + opts := &bsnet.MessageSenderOpts{ + MaxRetries: maxRetries, + SendTimeout: sendTimeout, + SendErrorBackoff: sendErrorBackoff, + } + nsender, err := mq.network.NewMessageSender(mq.ctx, mq.p, opts) + if err != nil { + return nil, err + } + + mq.sender = nsender + } + return mq.sender, nil +} diff --git a/internal/messagequeue/messagequeue_test.go b/internal/messagequeue/messagequeue_test.go new file mode 100644 index 0000000000000000000000000000000000000000..999a819cd266ef112e1f464b592fb1401e172058 --- /dev/null +++ b/internal/messagequeue/messagequeue_test.go @@ -0,0 +1,797 @@ +package messagequeue + +import ( + "context" + "fmt" + "math" + "math/rand" + "sync" + "testing" + "time" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + cid "gitlab.dms3.io/dms3/go-cid" + + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" + "gitlab.dms3.io/p2p/go-p2p/p2p/protocol/ping" +) + +type fakeMessageNetwork struct { + connectError error + messageSenderError error + messageSender bsnet.MessageSender +} + +func (fmn *fakeMessageNetwork) ConnectTo(context.Context, peer.ID) error { + return fmn.connectError +} + +func (fmn *fakeMessageNetwork) NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { + if fmn.messageSenderError == nil { + return fmn.messageSender, nil + } + return nil, fmn.messageSenderError +} + +func (fms *fakeMessageNetwork) Self() peer.ID { return "" } +func (fms *fakeMessageNetwork) Latency(peer.ID) time.Duration { return 0 } +func (fms *fakeMessageNetwork) Ping(context.Context, peer.ID) ping.Result { + return ping.Result{Error: fmt.Errorf("ping error")} +} + +type fakeDontHaveTimeoutMgr struct { + lk sync.Mutex + ks []cid.Cid + latencyUpds []time.Duration +} + +func (fp *fakeDontHaveTimeoutMgr) Start() {} +func (fp *fakeDontHaveTimeoutMgr) Shutdown() {} +func (fp *fakeDontHaveTimeoutMgr) AddPending(ks []cid.Cid) { + fp.lk.Lock() + defer fp.lk.Unlock() + + s := cid.NewSet() + for _, c := range append(fp.ks, ks...) { + s.Add(c) + } + fp.ks = s.Keys() +} +func (fp *fakeDontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { + fp.lk.Lock() + defer fp.lk.Unlock() + + s := cid.NewSet() + for _, c := range fp.ks { + s.Add(c) + } + for _, c := range ks { + s.Remove(c) + } + fp.ks = s.Keys() +} +func (fp *fakeDontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { + fp.lk.Lock() + defer fp.lk.Unlock() + + fp.latencyUpds = append(fp.latencyUpds, elapsed) +} +func (fp *fakeDontHaveTimeoutMgr) latencyUpdates() []time.Duration { + fp.lk.Lock() + defer fp.lk.Unlock() + + return fp.latencyUpds +} +func (fp *fakeDontHaveTimeoutMgr) pendingCount() int { + fp.lk.Lock() + defer fp.lk.Unlock() + + return len(fp.ks) +} + +type fakeMessageSender struct { + lk sync.Mutex + reset chan<- struct{} + messagesSent chan<- []bsmsg.Entry + supportsHave bool +} + +func newFakeMessageSender(reset chan<- struct{}, + messagesSent chan<- []bsmsg.Entry, supportsHave bool) *fakeMessageSender { + + return &fakeMessageSender{ + reset: reset, + messagesSent: messagesSent, + supportsHave: supportsHave, + } +} + +func (fms *fakeMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + fms.lk.Lock() + defer fms.lk.Unlock() + + fms.messagesSent <- msg.Wantlist() + return nil +} +func (fms *fakeMessageSender) Close() error { return nil } +func (fms *fakeMessageSender) Reset() error { fms.reset <- struct{}{}; return nil } +func (fms *fakeMessageSender) SupportsHave() bool { return fms.supportsHave } + +func mockTimeoutCb(peer.ID, []cid.Cid) {} + +func collectMessages(ctx context.Context, + t *testing.T, + messagesSent <-chan []bsmsg.Entry, + timeout time.Duration) [][]bsmsg.Entry { + var messagesReceived [][]bsmsg.Entry + timeoutctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + select { + case messageReceived := <-messagesSent: + messagesReceived = append(messagesReceived, messageReceived) + case <-timeoutctx.Done(): + return messagesReceived + } + } +} + +func totalEntriesLength(messages [][]bsmsg.Entry) int { + totalLength := 0 + for _, m := range messages { + totalLength += len(m) + } + return totalLength +} + +func TestStartupAndShutdown(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + bcstwh := testutil.GenerateCids(10) + + messageQueue.Startup() + messageQueue.AddBroadcastWantHaves(bcstwh) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent for broadcast want-haves") + } + + firstMessage := messages[0] + if len(firstMessage) != len(bcstwh) { + t.Fatal("did not add all wants to want list") + } + for _, entry := range firstMessage { + if entry.Cancel { + t.Fatal("initial add sent cancel entry when it should not have") + } + } + + messageQueue.Shutdown() + + timeoutctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer cancel() + select { + case <-resetChan: + case <-timeoutctx.Done(): + t.Fatal("message sender should have been reset but wasn't") + } +} + +func TestSendingMessagesDeduped(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddWants(wantBlocks, wantHaves) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("Messages were not deduped") + } +} + +func TestSendingMessagesPartialDupe(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks[:8], wantHaves[:8]) + messageQueue.AddWants(wantBlocks[3:], wantHaves[3:]) + messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("messages were not correctly deduped") + } +} + +func TestSendingMessagesPriority(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + wantHaves1 := testutil.GenerateCids(5) + wantHaves2 := testutil.GenerateCids(5) + wantHaves := append(wantHaves1, wantHaves2...) + wantBlocks1 := testutil.GenerateCids(5) + wantBlocks2 := testutil.GenerateCids(5) + wantBlocks := append(wantBlocks1, wantBlocks2...) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks1, wantHaves1) + messageQueue.AddWants(wantBlocks2, wantHaves2) + messages := collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + byCid := make(map[cid.Cid]bsmsg.Entry) + for _, entry := range messages[0] { + byCid[entry.Cid] = entry + } + + // Check that earliest want-haves have highest priority + for i := range wantHaves { + if i > 0 { + if byCid[wantHaves[i]].Priority > byCid[wantHaves[i-1]].Priority { + t.Fatal("earliest want-haves should have higher priority") + } + } + } + + // Check that earliest want-blocks have highest priority + for i := range wantBlocks { + if i > 0 { + if byCid[wantBlocks[i]].Priority > byCid[wantBlocks[i-1]].Priority { + t.Fatal("earliest want-blocks should have higher priority") + } + } + } + + // Check that want-haves have higher priority than want-blocks within + // same group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantBlocks[0]].Priority { + t.Fatal("want-haves should have higher priority than want-blocks") + } + } + } + + // Check that all items in first group have higher priority than first item + // in second group + for i := range wantHaves1 { + if i > 0 { + if byCid[wantHaves[i]].Priority <= byCid[wantHaves2[0]].Priority { + t.Fatal("items in first group should have higher priority than items in second group") + } + } + } +} + +func TestCancelOverridesPendingWants(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + + wantHaves := testutil.GenerateCids(2) + wantBlocks := testutil.GenerateCids(2) + cancels := []cid.Cid{wantBlocks[0], wantHaves[0]} + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, wantHaves) + messageQueue.AddCancels(cancels) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if totalEntriesLength(messages) != len(wantHaves)+len(wantBlocks)-len(cancels) { + t.Fatal("Wrong message count") + } + + // Cancelled 1 want-block and 1 want-have before they were sent + // so that leaves 1 want-block and 1 want-have + wb, wh, cl := filterWantTypes(messages[0]) + if len(wb) != 1 || !wb[0].Equals(wantBlocks[1]) { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 1 || !wh[0].Equals(wantHaves[1]) { + t.Fatal("Expected 1 want-have") + } + // Cancelled wants before they were sent, so no cancel should be sent + // to the network + if len(cl) != 0 { + t.Fatal("Expected no cancels") + } + + // Cancel the remaining want-blocks and want-haves + cancels = append(wantHaves, wantBlocks...) + messageQueue.AddCancels(cancels) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // The remaining 2 cancels should be sent to the network as they are for + // wants that were sent to the network + _, _, cl = filterWantTypes(messages[0]) + if len(cl) != 2 { + t.Fatal("Expected 2 cancels") + } +} + +func TestWantOverridesPendingCancels(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + + cids := testutil.GenerateCids(3) + wantBlocks := cids[:1] + wantHaves := cids[1:] + + messageQueue.Startup() + + // Add 1 want-block and 2 want-haves + messageQueue.AddWants(wantBlocks, wantHaves) + + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if totalEntriesLength(messages) != len(wantBlocks)+len(wantHaves) { + t.Fatal("Wrong message count", totalEntriesLength(messages)) + } + + // Cancel existing wants + messageQueue.AddCancels(cids) + // Override one cancel with a want-block (before cancel is sent to network) + messageQueue.AddWants(cids[:1], []cid.Cid{}) + + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if totalEntriesLength(messages) != 3 { + t.Fatal("Wrong message count", totalEntriesLength(messages)) + } + + // Should send 1 want-block and 2 cancels + wb, wh, cl := filterWantTypes(messages[0]) + if len(wb) != 1 { + t.Fatal("Expected 1 want-block") + } + if len(wh) != 0 { + t.Fatal("Expected 0 want-have") + } + if len(cl) != 2 { + t.Fatal("Expected 2 cancels") + } +} + +func TestWantlistRebroadcast(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + bcstwh := testutil.GenerateCids(10) + wantHaves := testutil.GenerateCids(10) + wantBlocks := testutil.GenerateCids(10) + + // Add some broadcast want-haves + messageQueue.Startup() + messageQueue.AddBroadcastWantHaves(bcstwh) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent for initial wants") + } + + // All broadcast want-haves should have been sent + firstMessage := messages[0] + if len(firstMessage) != len(bcstwh) { + t.Fatal("wrong number of wants") + } + + // Tell message queue to rebroadcast after 5ms, then wait 8ms + messageQueue.SetRebroadcastInterval(5 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 8*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were rebroadcast") + } + + // All the want-haves should have been rebroadcast + firstMessage = messages[0] + if len(firstMessage) != len(bcstwh) { + t.Fatal("did not rebroadcast all wants") + } + + // Tell message queue to rebroadcast after a long time (so it doesn't + // interfere with the next message collection), then send out some + // regular wants and collect them + messageQueue.SetRebroadcastInterval(1 * time.Second) + messageQueue.AddWants(wantBlocks, wantHaves) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were rebroadcast") + } + + // All new wants should have been sent + firstMessage = messages[0] + if len(firstMessage) != len(wantHaves)+len(wantBlocks) { + t.Fatal("wrong number of wants") + } + + // Tell message queue to rebroadcast after 10ms, then wait 15ms + messageQueue.SetRebroadcastInterval(10 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 15*time.Millisecond) + firstMessage = messages[0] + + // Both original and new wants should have been rebroadcast + totalWants := len(bcstwh) + len(wantHaves) + len(wantBlocks) + if len(firstMessage) != totalWants { + t.Fatal("did not rebroadcast all wants") + } + + // Cancel some of the wants + messageQueue.SetRebroadcastInterval(1 * time.Second) + cancels := append([]cid.Cid{bcstwh[0]}, wantHaves[0], wantBlocks[0]) + messageQueue.AddCancels(cancels) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + if len(messages) != 1 { + t.Fatal("wrong number of messages were rebroadcast") + } + + // Cancels for each want should have been sent + firstMessage = messages[0] + if len(firstMessage) != len(cancels) { + t.Fatal("wrong number of cancels") + } + for _, entry := range firstMessage { + if !entry.Cancel { + t.Fatal("expected cancels") + } + } + + // Tell message queue to rebroadcast after 10ms, then wait 15ms + messageQueue.SetRebroadcastInterval(10 * time.Millisecond) + messages = collectMessages(ctx, t, messagesSent, 15*time.Millisecond) + firstMessage = messages[0] + if len(firstMessage) != totalWants-len(cancels) { + t.Fatal("did not rebroadcast all wants") + } +} + +func TestSendingLargeMessages(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} + peerID := testutil.GeneratePeers(1)[0] + + wantBlocks := testutil.GenerateCids(10) + entrySize := 44 + maxMsgSize := entrySize * 3 // 3 wants + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMsgSize, sendErrorBackoff, maxValidLatency, dhtm) + + messageQueue.Startup() + messageQueue.AddWants(wantBlocks, []cid.Cid{}) + messages := collectMessages(ctx, t, messagesSent, 100*time.Millisecond) + + // want-block has size 44, so with maxMsgSize 44 * 3 (3 want-blocks), then if + // we send 10 want-blocks we should expect 4 messages: + // [***] [***] [***] [*] + if len(messages) != 4 { + t.Fatal("expected 4 messages to be sent, got", len(messages)) + } + if totalEntriesLength(messages) != len(wantBlocks) { + t.Fatal("wrong number of wants") + } +} + +func TestSendToPeerThatDoesntSupportHave(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + messageQueue := New(ctx, peerID, fakenet, mockTimeoutCb) + messageQueue.Startup() + + // If the remote peer doesn't support HAVE / DONT_HAVE messages + // - want-blocks should be sent normally + // - want-haves should not be sent + // - broadcast want-haves should be sent as want-blocks + + // Check broadcast want-haves + bcwh := testutil.GenerateCids(10) + messageQueue.AddBroadcastWantHaves(bcwh) + messages := collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl := messages[0] + if len(wl) != len(bcwh) { + t.Fatal("wrong number of entries in wantlist", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("broadcast want-haves should be sent as want-blocks") + } + } + + // Check regular want-haves and want-blocks + wbs := testutil.GenerateCids(10) + whs := testutil.GenerateCids(10) + messageQueue.AddWants(wbs, whs) + messages = collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + if len(messages) != 1 { + t.Fatal("wrong number of messages were sent", len(messages)) + } + wl = messages[0] + if len(wl) != len(wbs) { + t.Fatal("should only send want-blocks (no want-haves)", len(wl)) + } + for _, entry := range wl { + if entry.WantType != pb.Message_Wantlist_Block { + t.Fatal("should only send want-blocks") + } + } +} + +func TestSendToPeerThatDoesntSupportHaveMonitorsTimeouts(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue.Startup() + + wbs := testutil.GenerateCids(10) + messageQueue.AddWants(wbs, nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Check want-blocks are added to DontHaveTimeoutMgr + if dhtm.pendingCount() != len(wbs) { + t.Fatal("want-blocks not added to DontHaveTimeoutMgr") + } + + cancelCount := 2 + messageQueue.AddCancels(wbs[:cancelCount]) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Check want-blocks are removed from DontHaveTimeoutMgr + if dhtm.pendingCount() != len(wbs)-cancelCount { + t.Fatal("want-blocks not removed from DontHaveTimeoutMgr") + } +} + +func TestResponseReceived(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue.Startup() + + cids := testutil.GenerateCids(10) + + // Add some wants and wait 10ms + messageQueue.AddWants(cids[:5], nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Add some wants and wait another 10ms + messageQueue.AddWants(cids[5:8], nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Receive a response for some of the wants from both groups + messageQueue.ResponseReceived([]cid.Cid{cids[0], cids[6], cids[9]}) + + // Wait a short time for processing + time.Sleep(10 * time.Millisecond) + + // Check that message queue informs DHTM of received responses + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + // Elapsed time should be between when the first want was sent and the + // response received (about 20ms) + if upds[0] < 15*time.Millisecond || upds[0] > 25*time.Millisecond { + t.Fatal("expected latency to be time since oldest message sent") + } +} + +func TestResponseReceivedAppliesForFirstResponseOnly(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue.Startup() + + cids := testutil.GenerateCids(2) + + // Add some wants and wait 10ms + messageQueue.AddWants(cids, nil) + collectMessages(ctx, t, messagesSent, 10*time.Millisecond) + + // Receive a response for the wants + messageQueue.ResponseReceived(cids) + + // Wait another 10ms + time.Sleep(10 * time.Millisecond) + + // Message queue should inform DHTM of first response + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + + // Receive a second response for the same wants + messageQueue.ResponseReceived(cids) + + // Wait for the response to be processed by the message queue + time.Sleep(10 * time.Millisecond) + + // Message queue should not inform DHTM of second response because the + // CIDs are a subset of the first response + upds = dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } +} + +func TestResponseReceivedDiscardsOutliers(t *testing.T) { + ctx := context.Background() + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, false) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + peerID := testutil.GeneratePeers(1)[0] + + maxValLatency := 30 * time.Millisecond + dhtm := &fakeDontHaveTimeoutMgr{} + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValLatency, dhtm) + messageQueue.Startup() + + cids := testutil.GenerateCids(4) + + // Add some wants and wait 20ms + messageQueue.AddWants(cids[:2], nil) + collectMessages(ctx, t, messagesSent, 20*time.Millisecond) + + // Add some more wants and wait long enough that the first wants will be + // outside the maximum valid latency, but the second wants will be inside + messageQueue.AddWants(cids[2:], nil) + collectMessages(ctx, t, messagesSent, maxValLatency-10*time.Millisecond) + + // Receive a response for the wants + messageQueue.ResponseReceived(cids) + + // Wait for the response to be processed by the message queue + time.Sleep(10 * time.Millisecond) + + // Check that the latency calculation excludes the first wants + // (because they're older than max valid latency) + upds := dhtm.latencyUpdates() + if len(upds) != 1 { + t.Fatal("expected one latency update") + } + // Elapsed time should not include outliers + if upds[0] > maxValLatency { + t.Fatal("expected latency calculation to discard outliers") + } +} + +func filterWantTypes(wantlist []bsmsg.Entry) ([]cid.Cid, []cid.Cid, []cid.Cid) { + var wbs []cid.Cid + var whs []cid.Cid + var cls []cid.Cid + for _, e := range wantlist { + if e.Cancel { + cls = append(cls, e.Cid) + } else if e.WantType == pb.Message_Wantlist_Block { + wbs = append(wbs, e.Cid) + } else { + whs = append(whs, e.Cid) + } + } + return wbs, whs, cls +} + +// Simplistic benchmark to allow us to simulate conditions on the gateways +func BenchmarkMessageQueue(b *testing.B) { + ctx := context.Background() + + createQueue := func() *MessageQueue { + messagesSent := make(chan []bsmsg.Entry) + resetChan := make(chan struct{}, 1) + fakeSender := newFakeMessageSender(resetChan, messagesSent, true) + fakenet := &fakeMessageNetwork{nil, nil, fakeSender} + dhtm := &fakeDontHaveTimeoutMgr{} + peerID := testutil.GeneratePeers(1)[0] + + messageQueue := newMessageQueue(ctx, peerID, fakenet, maxMessageSize, sendErrorBackoff, maxValidLatency, dhtm) + messageQueue.Startup() + + go func() { + for { + <-messagesSent + time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond) + } + }() + + return messageQueue + } + + // Create a handful of message queues to start with + var qs []*MessageQueue + for i := 0; i < 5; i++ { + qs = append(qs, createQueue()) + } + + for n := 0; n < b.N; n++ { + // Create a new message queue every 10 ticks + if n%10 == 0 { + qs = append(qs, createQueue()) + } + + // Pick a random message queue, favoring those created later + qn := len(qs) + i := int(math.Floor(float64(qn) * float64(1-rand.Float32()*rand.Float32()))) + if i >= qn { // because of floating point math + i = qn - 1 + } + + // Alternately add either a few wants or a lot of broadcast wants + if rand.Intn(2) == 0 { + wants := testutil.GenerateCids(10) + qs[i].AddWants(wants[:2], wants[2:]) + } else { + wants := testutil.GenerateCids(60) + qs[i].AddBroadcastWantHaves(wants) + } + } +} diff --git a/internal/notifications/notifications.go b/internal/notifications/notifications.go new file mode 100644 index 0000000000000000000000000000000000000000..35958614089fe148167ceacaa5a2f9ce2db3bd0d --- /dev/null +++ b/internal/notifications/notifications.go @@ -0,0 +1,137 @@ +package notifications + +import ( + "context" + "sync" + + pubsub "github.com/cskr/pubsub" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" +) + +const bufferSize = 16 + +// PubSub is a simple interface for publishing blocks and being able to subscribe +// for cids. It's used internally by bitswap to decouple receiving blocks +// and actually providing them back to the GetBlocks caller. +type PubSub interface { + Publish(block blocks.Block) + Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block + Shutdown() +} + +// New generates a new PubSub interface. +func New() PubSub { + return &impl{ + wrapped: *pubsub.New(bufferSize), + closed: make(chan struct{}), + } +} + +type impl struct { + lk sync.RWMutex + wrapped pubsub.PubSub + + closed chan struct{} +} + +func (ps *impl) Publish(block blocks.Block) { + ps.lk.RLock() + defer ps.lk.RUnlock() + select { + case <-ps.closed: + return + default: + } + + ps.wrapped.Pub(block, block.Cid().KeyString()) +} + +func (ps *impl) Shutdown() { + ps.lk.Lock() + defer ps.lk.Unlock() + select { + case <-ps.closed: + return + default: + } + close(ps.closed) + ps.wrapped.Shutdown() +} + +// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| +// is closed if the |ctx| times out or is cancelled, or after receiving the blocks +// corresponding to |keys|. +func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { + + blocksCh := make(chan blocks.Block, len(keys)) + valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking + if len(keys) == 0 { + close(blocksCh) + return blocksCh + } + + // prevent shutdown + ps.lk.RLock() + defer ps.lk.RUnlock() + + select { + case <-ps.closed: + close(blocksCh) + return blocksCh + default: + } + + // AddSubOnceEach listens for each key in the list, and closes the channel + // once all keys have been received + ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) + go func() { + defer func() { + close(blocksCh) + + ps.lk.RLock() + defer ps.lk.RUnlock() + // Don't touch the pubsub instance if we're + // already closed. + select { + case <-ps.closed: + return + default: + } + + ps.wrapped.Unsub(valuesCh) + }() + + for { + select { + case <-ctx.Done(): + return + case <-ps.closed: + case val, ok := <-valuesCh: + if !ok { + return + } + block, ok := val.(blocks.Block) + if !ok { + return + } + select { + case <-ctx.Done(): + return + case blocksCh <- block: // continue + case <-ps.closed: + } + } + } + }() + + return blocksCh +} + +func toStrings(keys []cid.Cid) []string { + strs := make([]string, 0, len(keys)) + for _, key := range keys { + strs = append(strs, key.KeyString()) + } + return strs +} diff --git a/internal/notifications/notifications_test.go b/internal/notifications/notifications_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0f0ec8dd2c9e122450cf930ace15859a74c27137 --- /dev/null +++ b/internal/notifications/notifications_test.go @@ -0,0 +1,187 @@ +package notifications + +import ( + "bytes" + "context" + "testing" + "time" + + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + blocksutil "gitlab.dms3.io/dms3/go-dms3-blocksutil" +) + +func TestDuplicates(t *testing.T) { + b1 := blocks.NewBlock([]byte("1")) + b2 := blocks.NewBlock([]byte("2")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), b1.Cid(), b2.Cid()) + + n.Publish(b1) + blockRecvd, ok := <-ch + if !ok { + t.Fail() + } + assertBlocksEqual(t, b1, blockRecvd) + + n.Publish(b1) // ignored duplicate + + n.Publish(b2) + blockRecvd, ok = <-ch + if !ok { + t.Fail() + } + assertBlocksEqual(t, b2, blockRecvd) +} + +func TestPublishSubscribe(t *testing.T) { + blockSent := blocks.NewBlock([]byte("Greetings from The Interval")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), blockSent.Cid()) + + n.Publish(blockSent) + blockRecvd, ok := <-ch + if !ok { + t.Fail() + } + + assertBlocksEqual(t, blockRecvd, blockSent) + +} + +func TestSubscribeMany(t *testing.T) { + e1 := blocks.NewBlock([]byte("1")) + e2 := blocks.NewBlock([]byte("2")) + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), e1.Cid(), e2.Cid()) + + n.Publish(e1) + r1, ok := <-ch + if !ok { + t.Fatal("didn't receive first expected block") + } + assertBlocksEqual(t, e1, r1) + + n.Publish(e2) + r2, ok := <-ch + if !ok { + t.Fatal("didn't receive second expected block") + } + assertBlocksEqual(t, e2, r2) +} + +// TestDuplicateSubscribe tests a scenario where a given block +// would be requested twice at the same time. +func TestDuplicateSubscribe(t *testing.T) { + e1 := blocks.NewBlock([]byte("1")) + + n := New() + defer n.Shutdown() + ch1 := n.Subscribe(context.Background(), e1.Cid()) + ch2 := n.Subscribe(context.Background(), e1.Cid()) + + n.Publish(e1) + r1, ok := <-ch1 + if !ok { + t.Fatal("didn't receive first expected block") + } + assertBlocksEqual(t, e1, r1) + + r2, ok := <-ch2 + if !ok { + t.Fatal("didn't receive second expected block") + } + assertBlocksEqual(t, e1, r2) +} + +func TestShutdownBeforeUnsubscribe(t *testing.T) { + e1 := blocks.NewBlock([]byte("1")) + + n := New() + ctx, cancel := context.WithCancel(context.Background()) + ch := n.Subscribe(ctx, e1.Cid()) // no keys provided + n.Shutdown() + cancel() + + select { + case _, ok := <-ch: + if ok { + t.Fatal("channel should have been closed") + } + case <-time.After(5 * time.Second): + t.Fatal("channel should have been closed") + } +} + +func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) { + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background()) // no keys provided + if _, ok := <-ch; ok { + t.Fatal("should be closed if no keys provided") + } +} + +func TestCarryOnWhenDeadlineExpires(t *testing.T) { + + impossibleDeadline := time.Nanosecond + fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline) + defer cancel() + + n := New() + defer n.Shutdown() + block := blocks.NewBlock([]byte("A Missed Connection")) + blockChannel := n.Subscribe(fastExpiringCtx, block.Cid()) + + assertBlockChannelNil(t, blockChannel) +} + +func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) { + + g := blocksutil.NewBlockGenerator() + ctx, cancel := context.WithCancel(context.Background()) + n := New() + defer n.Shutdown() + + t.Log("generate a large number of blocks. exceed default buffer") + bs := g.Blocks(1000) + ks := func() []cid.Cid { + var keys []cid.Cid + for _, b := range bs { + keys = append(keys, b.Cid()) + } + return keys + }() + + _ = n.Subscribe(ctx, ks...) // ignore received channel + + t.Log("cancel context before any blocks published") + cancel() + for _, b := range bs { + n.Publish(b) + } + + t.Log("publishing the large number of blocks to the ignored channel must not deadlock") +} + +func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { + _, ok := <-blockChannel + if ok { + t.Fail() + } +} + +func assertBlocksEqual(t *testing.T, a, b blocks.Block) { + if !bytes.Equal(a.RawData(), b.RawData()) { + t.Fatal("blocks aren't equal") + } + if a.Cid() != b.Cid() { + t.Fatal("block keys aren't equal") + } +} diff --git a/internal/peermanager/peermanager.go b/internal/peermanager/peermanager.go new file mode 100644 index 0000000000000000000000000000000000000000..c770b020f93038f84306136eaa3a570f24bb46bf --- /dev/null +++ b/internal/peermanager/peermanager.go @@ -0,0 +1,246 @@ +package peermanager + +import ( + "context" + "sync" + + logging "gitlab.dms3.io/dms3/go-log" + "gitlab.dms3.io/dms3/go-metrics-interface" + + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +var log = logging.Logger("bs:peermgr") + +// PeerQueue provides a queue of messages to be sent for a single peer. +type PeerQueue interface { + AddBroadcastWantHaves([]cid.Cid) + AddWants([]cid.Cid, []cid.Cid) + AddCancels([]cid.Cid) + ResponseReceived(ks []cid.Cid) + Startup() + Shutdown() +} + +type Session interface { + ID() uint64 + SignalAvailability(peer.ID, bool) +} + +// PeerQueueFactory provides a function that will create a PeerQueue. +type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue + +// PeerManager manages a pool of peers and sends messages to peers in the pool. +type PeerManager struct { + // sync access to peerQueues and peerWantManager + pqLk sync.RWMutex + // peerQueues -- interact through internal utility functions get/set/remove/iterate + peerQueues map[peer.ID]PeerQueue + pwm *peerWantManager + + createPeerQueue PeerQueueFactory + ctx context.Context + + psLk sync.RWMutex + sessions map[uint64]Session + peerSessions map[peer.ID]map[uint64]struct{} + + self peer.ID +} + +// New creates a new PeerManager, given a context and a peerQueueFactory. +func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { + wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() + wantBlockGauge := metrics.NewCtx(ctx, "want_blocks_total", "Number of want-blocks in wantlist.").Gauge() + return &PeerManager{ + peerQueues: make(map[peer.ID]PeerQueue), + pwm: newPeerWantManager(wantGauge, wantBlockGauge), + createPeerQueue: createPeerQueue, + ctx: ctx, + self: self, + + sessions: make(map[uint64]Session), + peerSessions: make(map[peer.ID]map[uint64]struct{}), + } +} + +func (pm *PeerManager) AvailablePeers() []peer.ID { + // TODO: Rate-limit peers + return pm.ConnectedPeers() +} + +// ConnectedPeers returns a list of peers this PeerManager is managing. +func (pm *PeerManager) ConnectedPeers() []peer.ID { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + peers := make([]peer.ID, 0, len(pm.peerQueues)) + for p := range pm.peerQueues { + peers = append(peers, p) + } + return peers +} + +// Connected is called to add a new peer to the pool, and send it an initial set +// of wants. +func (pm *PeerManager) Connected(p peer.ID) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + pq := pm.getOrCreate(p) + + // Inform the peer want manager that there's a new peer + pm.pwm.addPeer(pq, p) + + // Inform the sessions that the peer has connected + pm.signalAvailability(p, true) +} + +// Disconnected is called to remove a peer from the pool. +func (pm *PeerManager) Disconnected(p peer.ID) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + pq, ok := pm.peerQueues[p] + + if !ok { + return + } + + // Inform the sessions that the peer has disconnected + pm.signalAvailability(p, false) + + // Clean up the peer + delete(pm.peerQueues, p) + pq.Shutdown() + pm.pwm.removePeer(p) +} + +// ResponseReceived is called when a message is received from the network. +// ks is the set of blocks, HAVEs and DONT_HAVEs in the message +// Note that this is just used to calculate latency. +func (pm *PeerManager) ResponseReceived(p peer.ID, ks []cid.Cid) { + pm.pqLk.Lock() + pq, ok := pm.peerQueues[p] + pm.pqLk.Unlock() + + if ok { + pq.ResponseReceived(ks) + } +} + +// BroadcastWantHaves broadcasts want-haves to all peers (used by the session +// to discover seeds). +// For each peer it filters out want-haves that have previously been sent to +// the peer. +func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + pm.pwm.broadcastWantHaves(wantHaves) +} + +// SendWants sends the given want-blocks and want-haves to the given peer. +// It filters out wants that have previously been sent to the peer. +func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + if _, ok := pm.peerQueues[p]; ok { + pm.pwm.sendWants(p, wantBlocks, wantHaves) + } +} + +// SendCancels sends cancels for the given keys to all peers who had previously +// received a want for those keys. +func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { + pm.pqLk.Lock() + defer pm.pqLk.Unlock() + + // Send a CANCEL to each peer that has been sent a want-block or want-have + pm.pwm.sendCancels(cancelKs) +} + +// CurrentWants returns the list of pending wants (both want-haves and want-blocks). +func (pm *PeerManager) CurrentWants() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.getWants() +} + +// CurrentWantBlocks returns the list of pending want-blocks +func (pm *PeerManager) CurrentWantBlocks() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.getWantBlocks() +} + +// CurrentWantHaves returns the list of pending want-haves +func (pm *PeerManager) CurrentWantHaves() []cid.Cid { + pm.pqLk.RLock() + defer pm.pqLk.RUnlock() + + return pm.pwm.getWantHaves() +} + +func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { + pq, ok := pm.peerQueues[p] + if !ok { + pq = pm.createPeerQueue(pm.ctx, p) + pq.Startup() + pm.peerQueues[p] = pq + } + return pq +} + +// RegisterSession tells the PeerManager that the given session is interested +// in events about the given peer. +func (pm *PeerManager) RegisterSession(p peer.ID, s Session) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + if _, ok := pm.sessions[s.ID()]; !ok { + pm.sessions[s.ID()] = s + } + + if _, ok := pm.peerSessions[p]; !ok { + pm.peerSessions[p] = make(map[uint64]struct{}) + } + pm.peerSessions[p][s.ID()] = struct{}{} +} + +// UnregisterSession tells the PeerManager that the given session is no longer +// interested in PeerManager events. +func (pm *PeerManager) UnregisterSession(ses uint64) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + for p := range pm.peerSessions { + delete(pm.peerSessions[p], ses) + if len(pm.peerSessions[p]) == 0 { + delete(pm.peerSessions, p) + } + } + + delete(pm.sessions, ses) +} + +// signalAvailability is called when a peer's connectivity changes. +// It informs interested sessions. +func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { + pm.psLk.Lock() + defer pm.psLk.Unlock() + + sesIds, ok := pm.peerSessions[p] + if !ok { + return + } + for sesId := range sesIds { + if s, ok := pm.sessions[sesId]; ok { + s.SignalAvailability(p, isConnected) + } + } +} diff --git a/internal/peermanager/peermanager_test.go b/internal/peermanager/peermanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0e0ae77483b8aebc2026c3f04189fc86734a21a7 --- /dev/null +++ b/internal/peermanager/peermanager_test.go @@ -0,0 +1,379 @@ +package peermanager + +import ( + "context" + "math/rand" + "testing" + "time" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" + + "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type msg struct { + p peer.ID + wantBlocks []cid.Cid + wantHaves []cid.Cid + cancels []cid.Cid +} + +type mockPeerQueue struct { + p peer.ID + msgs chan msg +} + +func (fp *mockPeerQueue) Startup() {} +func (fp *mockPeerQueue) Shutdown() {} + +func (fp *mockPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, whs, nil} +} +func (fp *mockPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) { + fp.msgs <- msg{fp.p, wbs, whs, nil} +} +func (fp *mockPeerQueue) AddCancels(cs []cid.Cid) { + fp.msgs <- msg{fp.p, nil, nil, cs} +} +func (fp *mockPeerQueue) ResponseReceived(ks []cid.Cid) { +} + +type peerWants struct { + wantHaves []cid.Cid + wantBlocks []cid.Cid + cancels []cid.Cid +} + +func collectMessages(ch chan msg, timeout time.Duration) map[peer.ID]peerWants { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + collected := make(map[peer.ID]peerWants) + for { + select { + case m := <-ch: + pw, ok := collected[m.p] + if !ok { + pw = peerWants{} + } + pw.wantHaves = append(pw.wantHaves, m.wantHaves...) + pw.wantBlocks = append(pw.wantBlocks, m.wantBlocks...) + pw.cancels = append(pw.cancels, m.cancels...) + collected[m.p] = pw + case <-ctx.Done(): + return collected + } + } +} + +func makePeerQueueFactory(msgs chan msg) PeerQueueFactory { + return func(ctx context.Context, p peer.ID) PeerQueue { + return &mockPeerQueue{ + p: p, + msgs: msgs, + } + } +} + +func TestAddingAndRemovingPeers(t *testing.T) { + ctx := context.Background() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + + tp := testutil.GeneratePeers(6) + self, peer1, peer2, peer3, peer4, peer5 := tp[0], tp[1], tp[2], tp[3], tp[4], tp[5] + peerManager := New(ctx, peerQueueFactory, self) + + peerManager.Connected(peer1) + peerManager.Connected(peer2) + peerManager.Connected(peer3) + + connectedPeers := peerManager.ConnectedPeers() + + if !testutil.ContainsPeer(connectedPeers, peer1) || + !testutil.ContainsPeer(connectedPeers, peer2) || + !testutil.ContainsPeer(connectedPeers, peer3) { + t.Fatal("Peers not connected that should be connected") + } + + if testutil.ContainsPeer(connectedPeers, peer4) || + testutil.ContainsPeer(connectedPeers, peer5) { + t.Fatal("Peers connected that shouldn't be connected") + } + + // disconnect a peer + peerManager.Disconnected(peer1) + connectedPeers = peerManager.ConnectedPeers() + + if testutil.ContainsPeer(connectedPeers, peer1) { + t.Fatal("Peer should have been disconnected but was not") + } + + // reconnect peer + peerManager.Connected(peer1) + connectedPeers = peerManager.ConnectedPeers() + + if !testutil.ContainsPeer(connectedPeers, peer1) { + t.Fatal("Peer should have been connected but was not") + } +} + +func TestBroadcastOnConnect(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + + cids := testutil.GenerateCids(2) + peerManager.BroadcastWantHaves(ctx, cids) + + // Connect with two broadcast wants for first peer + peerManager.Connected(peer1) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } +} + +func TestBroadcastWantHaves(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + + cids := testutil.GenerateCids(3) + + // Broadcast the first two. + peerManager.BroadcastWantHaves(ctx, cids[:2]) + + // First peer should get them. + peerManager.Connected(peer1) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 2 { + t.Fatal("Expected want-haves to be sent to newly connected peer") + } + + // Connect to second peer + peerManager.Connected(peer2) + + // Send a broadcast to all peers, including cid that was already sent to + // first peer + peerManager.BroadcastWantHaves(ctx, []cid.Cid{cids[0], cids[2]}) + collected = collectMessages(msgs, 2*time.Millisecond) + + // One of the want-haves was already sent to peer1 + if len(collected[peer1].wantHaves) != 1 { + t.Fatalf("Expected 1 want-haves to be sent to first peer, got %d", + len(collected[peer1].wantHaves)) + } + if len(collected[peer2].wantHaves) != 3 { + t.Fatalf("Expected 3 want-haves to be sent to second peer, got %d", + len(collected[peer2].wantHaves)) + } +} + +func TestSendWants(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(2) + self, peer1 := tp[0], tp[1] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) + + peerManager.Connected(peer1) + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0]}, []cid.Cid{cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) + + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") + } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } + + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2], cids[3]}) + collected = collectMessages(msgs, 2*time.Millisecond) + + // First want-have and want-block should be filtered (because they were + // already sent) + if len(collected[peer1].wantHaves) != 1 { + t.Fatal("Expected want-have to be sent to peer") + } + if len(collected[peer1].wantBlocks) != 1 { + t.Fatal("Expected want-block to be sent to peer") + } +} + +func TestSendCancels(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + tp := testutil.GeneratePeers(3) + self, peer1, peer2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + cids := testutil.GenerateCids(4) + + // Connect to peer1 and peer2 + peerManager.Connected(peer1) + peerManager.Connected(peer2) + + // Send 2 want-blocks and 1 want-have to peer1 + peerManager.SendWants(ctx, peer1, []cid.Cid{cids[0], cids[1]}, []cid.Cid{cids[2]}) + + // Clear messages + collectMessages(msgs, 2*time.Millisecond) + + // Send cancels for 1 want-block and 1 want-have + peerManager.SendCancels(ctx, []cid.Cid{cids[0], cids[2]}) + collected := collectMessages(msgs, 2*time.Millisecond) + + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") + } + if len(collected[peer1].cancels) != 2 { + t.Fatal("Expected cancel to be sent for want-block and want-have sent to peer") + } + + // Send cancels for all cids + peerManager.SendCancels(ctx, cids) + collected = collectMessages(msgs, 2*time.Millisecond) + + if _, ok := collected[peer2]; ok { + t.Fatal("Expected no cancels to be sent to peer that was not sent messages") + } + if len(collected[peer1].cancels) != 1 { + t.Fatal("Expected cancel to be sent for remaining want-block") + } +} + +func (s *sess) ID() uint64 { + return s.id +} +func (s *sess) SignalAvailability(p peer.ID, isAvailable bool) { + s.available[p] = isAvailable +} + +type sess struct { + id uint64 + available map[peer.ID]bool +} + +func newSess(id uint64) *sess { + return &sess{id, make(map[peer.ID]bool)} +} + +func TestSessionRegistration(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + msgs := make(chan msg, 16) + peerQueueFactory := makePeerQueueFactory(msgs) + + tp := testutil.GeneratePeers(3) + self, p1, p2 := tp[0], tp[1], tp[2] + peerManager := New(ctx, peerQueueFactory, self) + + id := uint64(1) + s := newSess(id) + peerManager.RegisterSession(p1, s) + if s.available[p1] { + t.Fatal("Expected peer not be available till connected") + } + peerManager.RegisterSession(p2, s) + if s.available[p2] { + t.Fatal("Expected peer not be available till connected") + } + + peerManager.Connected(p1) + if !s.available[p1] { + t.Fatal("Expected signal callback") + } + peerManager.Connected(p2) + if !s.available[p2] { + t.Fatal("Expected signal callback") + } + + peerManager.Disconnected(p1) + if s.available[p1] { + t.Fatal("Expected signal callback") + } + if !s.available[p2] { + t.Fatal("Expected signal callback only for disconnected peer") + } + + peerManager.UnregisterSession(id) + + peerManager.Connected(p1) + if s.available[p1] { + t.Fatal("Expected no signal callback (session unregistered)") + } +} + +type benchPeerQueue struct { +} + +func (*benchPeerQueue) Startup() {} +func (*benchPeerQueue) Shutdown() {} + +func (*benchPeerQueue) AddBroadcastWantHaves(whs []cid.Cid) {} +func (*benchPeerQueue) AddWants(wbs []cid.Cid, whs []cid.Cid) {} +func (*benchPeerQueue) AddCancels(cs []cid.Cid) {} +func (*benchPeerQueue) ResponseReceived(ks []cid.Cid) {} + +// Simplistic benchmark to allow us to stress test +func BenchmarkPeerManager(b *testing.B) { + b.StopTimer() + + ctx := context.Background() + + peerQueueFactory := func(ctx context.Context, p peer.ID) PeerQueue { + return &benchPeerQueue{} + } + + self := testutil.GeneratePeers(1)[0] + peers := testutil.GeneratePeers(500) + peerManager := New(ctx, peerQueueFactory, self) + + // Create a bunch of connections + connected := 0 + for i := 0; i < len(peers); i++ { + peerManager.Connected(peers[i]) + connected++ + } + + var wanted []cid.Cid + + b.StartTimer() + for n := 0; n < b.N; n++ { + // Pick a random peer + i := rand.Intn(connected) + + // Alternately add either a few wants or many broadcast wants + r := rand.Intn(8) + if r == 0 { + wants := testutil.GenerateCids(10) + peerManager.SendWants(ctx, peers[i], wants[:2], wants[2:]) + wanted = append(wanted, wants...) + } else if r == 1 { + wants := testutil.GenerateCids(30) + peerManager.BroadcastWantHaves(ctx, wants) + wanted = append(wanted, wants...) + } else { + limit := len(wanted) / 10 + cancel := wanted[:limit] + wanted = wanted[limit:] + peerManager.SendCancels(ctx, cancel) + } + } +} diff --git a/internal/peermanager/peerwantmanager.go b/internal/peermanager/peerwantmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..35ee766fdddf298b7642be6c897dda9713d31659 --- /dev/null +++ b/internal/peermanager/peerwantmanager.go @@ -0,0 +1,464 @@ +package peermanager + +import ( + "bytes" + "fmt" + + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +// Gauge can be used to keep track of a metric that increases and decreases +// incrementally. It is used by the peerWantManager to track the number of +// want-blocks that are active (ie sent but no response received) +type Gauge interface { + Inc() + Dec() +} + +// peerWantManager keeps track of which want-haves and want-blocks have been +// sent to each peer, so that the PeerManager doesn't send duplicates. +type peerWantManager struct { + // peerWants maps peers to outstanding wants. + // A peer's wants is the _union_ of the broadcast wants and the wants in + // this list. + peerWants map[peer.ID]*peerWant + + // Reverse index of all wants in peerWants. + wantPeers map[cid.Cid]map[peer.ID]struct{} + + // broadcastWants tracks all the current broadcast wants. + broadcastWants *cid.Set + + // Keeps track of the number of active want-haves & want-blocks + wantGauge Gauge + // Keeps track of the number of active want-blocks + wantBlockGauge Gauge +} + +type peerWant struct { + wantBlocks *cid.Set + wantHaves *cid.Set + peerQueue PeerQueue +} + +// New creates a new peerWantManager with a Gauge that keeps track of the +// number of active want-blocks (ie sent but no response received) +func newPeerWantManager(wantGauge Gauge, wantBlockGauge Gauge) *peerWantManager { + return &peerWantManager{ + broadcastWants: cid.NewSet(), + peerWants: make(map[peer.ID]*peerWant), + wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), + wantGauge: wantGauge, + wantBlockGauge: wantBlockGauge, + } +} + +// addPeer adds a peer whose wants we need to keep track of. It sends the +// current list of broadcast wants to the peer. +func (pwm *peerWantManager) addPeer(peerQueue PeerQueue, p peer.ID) { + if _, ok := pwm.peerWants[p]; ok { + return + } + + pwm.peerWants[p] = &peerWant{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + peerQueue: peerQueue, + } + + // Broadcast any live want-haves to the newly connected peer + if pwm.broadcastWants.Len() > 0 { + wants := pwm.broadcastWants.Keys() + peerQueue.AddBroadcastWantHaves(wants) + } +} + +// RemovePeer removes a peer and its associated wants from tracking +func (pwm *peerWantManager) removePeer(p peer.ID) { + pws, ok := pwm.peerWants[p] + if !ok { + return + } + + // Clean up want-blocks + _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { + // Clean up want-blocks from the reverse index + pwm.reverseIndexRemove(c, p) + + // Decrement the gauges by the number of pending want-blocks to the peer + peerCounts := pwm.wantPeerCounts(c) + if peerCounts.wantBlock == 0 { + pwm.wantBlockGauge.Dec() + } + if !peerCounts.wanted() { + pwm.wantGauge.Dec() + } + + return nil + }) + + // Clean up want-haves + _ = pws.wantHaves.ForEach(func(c cid.Cid) error { + // Clean up want-haves from the reverse index + pwm.reverseIndexRemove(c, p) + + // Decrement the gauge by the number of pending want-haves to the peer + peerCounts := pwm.wantPeerCounts(c) + if !peerCounts.wanted() { + pwm.wantGauge.Dec() + } + return nil + }) + + delete(pwm.peerWants, p) +} + +// broadcastWantHaves sends want-haves to any peers that have not yet been sent them. +func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { + unsent := make([]cid.Cid, 0, len(wantHaves)) + for _, c := range wantHaves { + if pwm.broadcastWants.Has(c) { + // Already a broadcast want, skip it. + continue + } + pwm.broadcastWants.Add(c) + unsent = append(unsent, c) + + // If no peer has a pending want for the key + if _, ok := pwm.wantPeers[c]; !ok { + // Increment the total wants gauge + pwm.wantGauge.Inc() + } + } + + if len(unsent) == 0 { + return + } + + // Allocate a single buffer to filter broadcast wants for each peer + bcstWantsBuffer := make([]cid.Cid, 0, len(unsent)) + + // Send broadcast wants to each peer + for _, pws := range pwm.peerWants { + peerUnsent := bcstWantsBuffer[:0] + for _, c := range unsent { + // If we've already sent a want to this peer, skip them. + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + peerUnsent = append(peerUnsent, c) + } + } + + if len(peerUnsent) > 0 { + pws.peerQueue.AddBroadcastWantHaves(peerUnsent) + } + } +} + +// sendWants only sends the peer the want-blocks and want-haves that have not +// already been sent to it. +func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) + fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) + + // Get the existing want-blocks and want-haves for the peer + pws, ok := pwm.peerWants[p] + if !ok { + // In practice this should never happen + log.Errorf("sendWants() called with peer %s but peer not found in peerWantManager", string(p)) + return + } + + // Iterate over the requested want-blocks + for _, c := range wantBlocks { + // If the want-block hasn't been sent to the peer + if pws.wantBlocks.Has(c) { + continue + } + + // Increment the want gauges + peerCounts := pwm.wantPeerCounts(c) + if peerCounts.wantBlock == 0 { + pwm.wantBlockGauge.Inc() + } + if !peerCounts.wanted() { + pwm.wantGauge.Inc() + } + + // Make sure the CID is no longer recorded as a want-have + pws.wantHaves.Remove(c) + + // Record that the CID was sent as a want-block + pws.wantBlocks.Add(c) + + // Add the CID to the results + fltWantBlks = append(fltWantBlks, c) + + // Update the reverse index + pwm.reverseIndexAdd(c, p) + } + + // Iterate over the requested want-haves + for _, c := range wantHaves { + // If we've already broadcasted this want, don't bother with a + // want-have. + if pwm.broadcastWants.Has(c) { + continue + } + + // If the CID has not been sent as a want-block or want-have + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + // Increment the total wants gauge + peerCounts := pwm.wantPeerCounts(c) + if !peerCounts.wanted() { + pwm.wantGauge.Inc() + } + + // Record that the CID was sent as a want-have + pws.wantHaves.Add(c) + + // Add the CID to the results + fltWantHvs = append(fltWantHvs, c) + + // Update the reverse index + pwm.reverseIndexAdd(c, p) + } + } + + // Send the want-blocks and want-haves to the peer + pws.peerQueue.AddWants(fltWantBlks, fltWantHvs) +} + +// sendCancels sends a cancel to each peer to which a corresponding want was +// sent +func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { + if len(cancelKs) == 0 { + return + } + + // Record how many peers have a pending want-block and want-have for each + // key to be cancelled + peerCounts := make(map[cid.Cid]wantPeerCnts, len(cancelKs)) + for _, c := range cancelKs { + peerCounts[c] = pwm.wantPeerCounts(c) + } + + // Create a buffer to use for filtering cancels per peer, with the + // broadcast wants at the front of the buffer (broadcast wants are sent to + // all peers) + broadcastCancels := make([]cid.Cid, 0, len(cancelKs)) + for _, c := range cancelKs { + if pwm.broadcastWants.Has(c) { + broadcastCancels = append(broadcastCancels, c) + } + } + + // Send cancels to a particular peer + send := func(p peer.ID, pws *peerWant) { + // Start from the broadcast cancels + toCancel := broadcastCancels + + // For each key to be cancelled + for _, c := range cancelKs { + // Check if a want was sent for the key + if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { + continue + } + + // Unconditionally remove from the want lists. + pws.wantBlocks.Remove(c) + pws.wantHaves.Remove(c) + + // If it's a broadcast want, we've already added it to + // the peer cancels. + if !pwm.broadcastWants.Has(c) { + toCancel = append(toCancel, c) + } + } + + // Send cancels to the peer + if len(toCancel) > 0 { + pws.peerQueue.AddCancels(toCancel) + } + } + + if len(broadcastCancels) > 0 { + // If a broadcast want is being cancelled, send the cancel to all + // peers + for p, pws := range pwm.peerWants { + send(p, pws) + } + } else { + // Only send cancels to peers that received a corresponding want + cancelPeers := make(map[peer.ID]struct{}, len(pwm.wantPeers[cancelKs[0]])) + for _, c := range cancelKs { + for p := range pwm.wantPeers[c] { + cancelPeers[p] = struct{}{} + } + } + for p := range cancelPeers { + pws, ok := pwm.peerWants[p] + if !ok { + // Should never happen but check just in case + log.Errorf("sendCancels - peerWantManager index missing peer %s", p) + continue + } + + send(p, pws) + } + } + + // Decrement the wants gauges + for _, c := range cancelKs { + peerCnts := peerCounts[c] + + // If there were any peers that had a pending want-block for the key + if peerCnts.wantBlock > 0 { + // Decrement the want-block gauge + pwm.wantBlockGauge.Dec() + } + + // If there was a peer that had a pending want or it was a broadcast want + if peerCnts.wanted() { + // Decrement the total wants gauge + pwm.wantGauge.Dec() + } + } + + // Remove cancelled broadcast wants + for _, c := range broadcastCancels { + pwm.broadcastWants.Remove(c) + } + + // Batch-remove the reverse-index. There's no need to clear this index + // peer-by-peer. + for _, c := range cancelKs { + delete(pwm.wantPeers, c) + } +} + +// wantPeerCnts stores the number of peers that have pending wants for a CID +type wantPeerCnts struct { + // number of peers that have a pending want-block for the CID + wantBlock int + // number of peers that have a pending want-have for the CID + wantHave int + // whether the CID is a broadcast want + isBroadcast bool +} + +// wanted returns true if any peer wants the CID or it's a broadcast want +func (pwm *wantPeerCnts) wanted() bool { + return pwm.wantBlock > 0 || pwm.wantHave > 0 || pwm.isBroadcast +} + +// wantPeerCounts counts how many peers have a pending want-block and want-have +// for the given CID +func (pwm *peerWantManager) wantPeerCounts(c cid.Cid) wantPeerCnts { + blockCount := 0 + haveCount := 0 + for p := range pwm.wantPeers[c] { + pws, ok := pwm.peerWants[p] + if !ok { + log.Errorf("reverse index has extra peer %s for key %s in peerWantManager", string(p), c) + continue + } + + if pws.wantBlocks.Has(c) { + blockCount++ + } else if pws.wantHaves.Has(c) { + haveCount++ + } + } + + return wantPeerCnts{blockCount, haveCount, pwm.broadcastWants.Has(c)} +} + +// Add the peer to the list of peers that have sent a want with the cid +func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) bool { + peers, ok := pwm.wantPeers[c] + if !ok { + peers = make(map[peer.ID]struct{}, 10) + pwm.wantPeers[c] = peers + } + peers[p] = struct{}{} + return !ok +} + +// Remove the peer from the list of peers that have sent a want with the cid +func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { + if peers, ok := pwm.wantPeers[c]; ok { + delete(peers, p) + if len(peers) == 0 { + delete(pwm.wantPeers, c) + } + } +} + +// GetWantBlocks returns the set of all want-blocks sent to all peers +func (pwm *peerWantManager) getWantBlocks() []cid.Cid { + res := cid.NewSet() + + // Iterate over all known peers + for _, pws := range pwm.peerWants { + // Iterate over all want-blocks + _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { + // Add the CID to the results + res.Add(c) + return nil + }) + } + + return res.Keys() +} + +// GetWantHaves returns the set of all want-haves sent to all peers +func (pwm *peerWantManager) getWantHaves() []cid.Cid { + res := cid.NewSet() + + // Iterate over all peers with active wants. + for _, pws := range pwm.peerWants { + // Iterate over all want-haves + _ = pws.wantHaves.ForEach(func(c cid.Cid) error { + // Add the CID to the results + res.Add(c) + return nil + }) + } + _ = pwm.broadcastWants.ForEach(func(c cid.Cid) error { + res.Add(c) + return nil + }) + + return res.Keys() +} + +// GetWants returns the set of all wants (both want-blocks and want-haves). +func (pwm *peerWantManager) getWants() []cid.Cid { + res := pwm.broadcastWants.Keys() + + // Iterate over all targeted wants, removing ones that are also in the + // broadcast list. + for c := range pwm.wantPeers { + if pwm.broadcastWants.Has(c) { + continue + } + res = append(res, c) + } + + return res +} + +func (pwm *peerWantManager) String() string { + var b bytes.Buffer + for p, ws := range pwm.peerWants { + b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", p, ws.wantHaves.Len(), ws.wantBlocks.Len())) + for _, c := range ws.wantHaves.Keys() { + b.WriteString(fmt.Sprintf(" want-have %s\n", c)) + } + for _, c := range ws.wantBlocks.Keys() { + b.WriteString(fmt.Sprintf(" want-block %s\n", c)) + } + } + return b.String() +} diff --git a/internal/peermanager/peerwantmanager_test.go b/internal/peermanager/peerwantmanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5e46129354b582235e0a9487f5514291923ce292 --- /dev/null +++ b/internal/peermanager/peerwantmanager_test.go @@ -0,0 +1,516 @@ +package peermanager + +import ( + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type gauge struct { + count int +} + +func (g *gauge) Inc() { + g.count++ +} +func (g *gauge) Dec() { + g.count-- +} + +type mockPQ struct { + bcst []cid.Cid + wbs []cid.Cid + whs []cid.Cid + cancels []cid.Cid +} + +func (mpq *mockPQ) clear() { + mpq.bcst = nil + mpq.wbs = nil + mpq.whs = nil + mpq.cancels = nil +} + +func (mpq *mockPQ) Startup() {} +func (mpq *mockPQ) Shutdown() {} + +func (mpq *mockPQ) AddBroadcastWantHaves(whs []cid.Cid) { + mpq.bcst = append(mpq.bcst, whs...) +} +func (mpq *mockPQ) AddWants(wbs []cid.Cid, whs []cid.Cid) { + mpq.wbs = append(mpq.wbs, wbs...) + mpq.whs = append(mpq.whs, whs...) +} +func (mpq *mockPQ) AddCancels(cs []cid.Cid) { + mpq.cancels = append(mpq.cancels, cs...) +} +func (mpq *mockPQ) ResponseReceived(ks []cid.Cid) { +} + +func clearSent(pqs map[peer.ID]PeerQueue) { + for _, pqi := range pqs { + pqi.(*mockPQ).clear() + } +} + +func TestEmpty(t *testing.T) { + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + if len(pwm.getWantBlocks()) > 0 { + t.Fatal("Expected GetWantBlocks() to have length 0") + } + if len(pwm.getWantHaves()) > 0 { + t.Fatal("Expected GetWantHaves() to have length 0") + } +} + +func TestPWMBroadcastWantHaves(t *testing.T) { + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + peers := testutil.GeneratePeers(3) + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + cids3 := testutil.GenerateCids(2) + + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + if len(pq.bcst) > 0 { + t.Errorf("expected no broadcast wants") + } + } + + // Broadcast 2 cids to 2 peers + pwm.broadcastWantHaves(cids) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids) { + t.Fatal("Expected all cids to be broadcast") + } + } + + // Broadcasting same cids should have no effect + clearSent(peerQueues) + pwm.broadcastWantHaves(cids) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 0 { + t.Fatal("Expected 0 want-haves") + } + } + + // Broadcast 2 other cids + clearSent(peerQueues) + pwm.broadcastWantHaves(cids2) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids2) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Broadcast mix of old and new cids + clearSent(peerQueues) + pwm.broadcastWantHaves(append(cids, cids3...)) + for _, pqi := range peerQueues { + pq := pqi.(*mockPQ) + if len(pq.bcst) != 2 { + t.Fatal("Expected 2 want-haves") + } + // Only new cids should be broadcast + if !testutil.MatchKeysIgnoreOrder(pq.bcst, cids3) { + t.Fatal("Expected all new cids to be broadcast") + } + } + + // Sending want-block for a cid should prevent broadcast to that peer + clearSent(peerQueues) + cids4 := testutil.GenerateCids(4) + wantBlocks := []cid.Cid{cids4[0], cids4[2]} + p0 := peers[0] + p1 := peers[1] + pwm.sendWants(p0, wantBlocks, []cid.Cid{}) + + pwm.broadcastWantHaves(cids4) + pq0 := peerQueues[p0].(*mockPQ) + if len(pq0.bcst) != 2 { // only broadcast 2 / 4 want-haves + t.Fatal("Expected 2 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq0.bcst, []cid.Cid{cids4[1], cids4[3]}) { + t.Fatalf("Expected unsent cids to be broadcast") + } + pq1 := peerQueues[p1].(*mockPQ) + if len(pq1.bcst) != 4 { // broadcast all 4 want-haves + t.Fatal("Expected 4 want-haves") + } + if !testutil.MatchKeysIgnoreOrder(pq1.bcst, cids4) { + t.Fatal("Expected all cids to be broadcast") + } + + allCids := cids + allCids = append(allCids, cids2...) + allCids = append(allCids, cids3...) + allCids = append(allCids, cids4...) + + // Add another peer + peer2 := peers[2] + pq2 := &mockPQ{} + peerQueues[peer2] = pq2 + pwm.addPeer(pq2, peer2) + if !testutil.MatchKeysIgnoreOrder(pq2.bcst, allCids) { + t.Fatalf("Expected all cids to be broadcast.") + } + + clearSent(peerQueues) + pwm.broadcastWantHaves(allCids) + if len(pq2.bcst) != 0 { + t.Errorf("did not expect to have CIDs to broadcast") + } +} + +func TestPWMSendWants(t *testing.T) { + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + } + pq0 := peerQueues[p0].(*mockPQ) + pq1 := peerQueues[p1].(*mockPQ) + + // Send 2 want-blocks and 2 want-haves to p0 + clearSent(peerQueues) + pwm.sendWants(p0, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids2) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 + // - 1 old want-block and 2 new want-blocks + // - 1 old want-have and 2 new want-haves + clearSent(peerQueues) + cids3 := testutil.GenerateCids(2) + cids4 := testutil.GenerateCids(2) + pwm.sendWants(p0, append(cids3, cids[0]), append(cids4, cids2[0])) + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, cids3) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids4) { + t.Fatal("Expected 2 want-haves") + } + + // Send to p0 as want-blocks: 1 new want-block, 1 old want-have + clearSent(peerQueues) + cids5 := testutil.GenerateCids(1) + newWantBlockOldWantHave := append(cids5, cids2[0]) + pwm.sendWants(p0, newWantBlockOldWantHave, []cid.Cid{}) + // If a want was sent as a want-have, it should be ok to now send it as a + // want-block + if !testutil.MatchKeysIgnoreOrder(pq0.wbs, newWantBlockOldWantHave) { + t.Fatal("Expected 2 want-blocks") + } + if len(pq0.whs) != 0 { + t.Fatal("Expected 0 want-haves") + } + + // Send to p0 as want-haves: 1 new want-have, 1 old want-block + clearSent(peerQueues) + cids6 := testutil.GenerateCids(1) + newWantHaveOldWantBlock := append(cids6, cids[0]) + pwm.sendWants(p0, []cid.Cid{}, newWantHaveOldWantBlock) + // If a want was previously sent as a want-block, it should not be + // possible to now send it as a want-have + if !testutil.MatchKeysIgnoreOrder(pq0.whs, cids6) { + t.Fatal("Expected 1 want-have") + } + if len(pq0.wbs) != 0 { + t.Fatal("Expected 0 want-blocks") + } + + // Send 2 want-blocks and 2 want-haves to p1 + pwm.sendWants(p1, cids, cids2) + if !testutil.MatchKeysIgnoreOrder(pq1.wbs, cids) { + t.Fatal("Expected 2 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pq1.whs, cids2) { + t.Fatal("Expected 2 want-haves") + } +} + +func TestPWMSendCancels(t *testing.T) { + pwm := newPeerWantManager(&gauge{}, &gauge{}) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + wb1 := testutil.GenerateCids(2) + wh1 := testutil.GenerateCids(2) + wb2 := testutil.GenerateCids(2) + wh2 := testutil.GenerateCids(2) + allwb := append(wb1, wb2...) + allwh := append(wh1, wh2...) + + peerQueues := make(map[peer.ID]PeerQueue) + for _, p := range peers[:2] { + pq := &mockPQ{} + peerQueues[p] = pq + pwm.addPeer(pq, p) + } + pq0 := peerQueues[p0].(*mockPQ) + pq1 := peerQueues[p1].(*mockPQ) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, wb1, wh1) + // Send 3 want-blocks and 3 want-haves to p1 + // (1 overlapping want-block / want-have with p0) + pwm.sendWants(p1, append(wb2, wb1[1]), append(wh2, wh1[1])) + + if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), allwb) { + t.Fatal("Expected 4 cids to be wanted") + } + if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), allwh) { + t.Fatal("Expected 4 cids to be wanted") + } + + // Cancel 1 want-block and 1 want-have that were sent to p0 + clearSent(peerQueues) + pwm.sendCancels([]cid.Cid{wb1[0], wh1[0]}) + // Should cancel the want-block and want-have + if len(pq1.cancels) != 0 { + t.Fatal("Expected no cancels sent to p1") + } + if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[0], wh1[0]}) { + t.Fatal("Expected 2 cids to be cancelled") + } + if !testutil.MatchKeysIgnoreOrder(pwm.getWantBlocks(), append(wb2, wb1[1])) { + t.Fatal("Expected 3 want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(pwm.getWantHaves(), append(wh2, wh1[1])) { + t.Fatal("Expected 3 want-haves") + } + + // Cancel everything + clearSent(peerQueues) + allCids := append(allwb, allwh...) + pwm.sendCancels(allCids) + // Should cancel the remaining want-blocks and want-haves for p0 + if !testutil.MatchKeysIgnoreOrder(pq0.cancels, []cid.Cid{wb1[1], wh1[1]}) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + + // Should cancel the remaining want-blocks and want-haves for p1 + remainingP1 := append(wb2, wh2...) + remainingP1 = append(remainingP1, wb1[1], wh1[1]) + if len(pq1.cancels) != len(remainingP1) { + t.Fatal("mismatch", len(pq1.cancels), len(remainingP1)) + } + if !testutil.MatchKeysIgnoreOrder(pq1.cancels, remainingP1) { + t.Fatal("Expected un-cancelled cids to be cancelled") + } + if len(pwm.getWantBlocks()) != 0 { + t.Fatal("Expected 0 want-blocks") + } + if len(pwm.getWantHaves()) != 0 { + t.Fatal("Expected 0 want-haves") + } +} + +func TestStats(t *testing.T) { + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + peerQueues := make(map[peer.ID]PeerQueue) + pq := &mockPQ{} + peerQueues[p0] = pq + pwm.addPeer(pq, p0) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } + + // Send 1 old want-block and 2 new want-blocks to p0 + cids3 := testutil.GenerateCids(2) + pwm.sendWants(p0, append(cids3, cids[0]), []cid.Cid{}) + + if g.count != 6 { + t.Fatal("Expected 6 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Broadcast 1 old want-have and 2 new want-haves + cids4 := testutil.GenerateCids(2) + pwm.broadcastWantHaves(append(cids4, cids2[0])) + if g.count != 8 { + t.Fatal("Expected 8 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Add a second peer + pwm.addPeer(pq, p1) + + if g.count != 8 { + t.Fatal("Expected 8 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Cancel 1 want-block that was sent to p0 + // and 1 want-block that was not sent + cids5 := testutil.GenerateCids(1) + pwm.sendCancels(append(cids5, cids[0])) + + if g.count != 7 { + t.Fatal("Expected 7 wants") + } + if wbg.count != 3 { + t.Fatal("Expected 3 want-blocks") + } + + // Remove first peer + pwm.removePeer(p0) + + // Should still have 3 broadcast wants + if g.count != 3 { + t.Fatal("Expected 3 wants") + } + if wbg.count != 0 { + t.Fatal("Expected all want-blocks to be removed") + } + + // Remove second peer + pwm.removePeer(p1) + + // Should still have 3 broadcast wants + if g.count != 3 { + t.Fatal("Expected 3 wants") + } + if wbg.count != 0 { + t.Fatal("Expected 0 want-blocks") + } + + // Cancel one remaining broadcast want-have + pwm.sendCancels(cids2[:1]) + if g.count != 2 { + t.Fatal("Expected 2 wants") + } + if wbg.count != 0 { + t.Fatal("Expected 0 want-blocks") + } +} + +func TestStatsOverlappingWantBlockWantHave(t *testing.T) { + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.addPeer(&mockPQ{}, p0) + pwm.addPeer(&mockPQ{}, p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + // Send opposite: + // 2 want-haves and 2 want-blocks to p1 + pwm.sendWants(p1, cids2, cids) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Cancel 1 of each group of cids + pwm.sendCancels([]cid.Cid{cids[0], cids2[0]}) + + if g.count != 2 { + t.Fatal("Expected 2 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } +} + +func TestStatsRemovePeerOverlappingWantBlockWantHave(t *testing.T) { + g := &gauge{} + wbg := &gauge{} + pwm := newPeerWantManager(g, wbg) + + peers := testutil.GeneratePeers(2) + p0 := peers[0] + p1 := peers[1] + cids := testutil.GenerateCids(2) + cids2 := testutil.GenerateCids(2) + + pwm.addPeer(&mockPQ{}, p0) + pwm.addPeer(&mockPQ{}, p1) + + // Send 2 want-blocks and 2 want-haves to p0 + pwm.sendWants(p0, cids, cids2) + + // Send opposite: + // 2 want-haves and 2 want-blocks to p1 + pwm.sendWants(p1, cids2, cids) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 4 { + t.Fatal("Expected 4 want-blocks") + } + + // Remove p0 + pwm.removePeer(p0) + + if g.count != 4 { + t.Fatal("Expected 4 wants") + } + if wbg.count != 2 { + t.Fatal("Expected 2 want-blocks") + } +} diff --git a/internal/providerquerymanager/providerquerymanager.go b/internal/providerquerymanager/providerquerymanager.go new file mode 100644 index 0000000000000000000000000000000000000000..1ec791c279d8e4f196a1887d9412aea45cd83049 --- /dev/null +++ b/internal/providerquerymanager/providerquerymanager.go @@ -0,0 +1,423 @@ +package providerquerymanager + +import ( + "context" + "fmt" + "sync" + "time" + + "gitlab.dms3.io/dms3/go-cid" + logging "gitlab.dms3.io/dms3/go-log" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +var log = logging.Logger("bitswap") + +const ( + maxProviders = 10 + maxInProcessRequests = 6 + defaultTimeout = 10 * time.Second +) + +type inProgressRequestStatus struct { + ctx context.Context + cancelFn func() + providersSoFar []peer.ID + listeners map[chan peer.ID]struct{} +} + +type findProviderRequest struct { + k cid.Cid + ctx context.Context +} + +// ProviderQueryNetwork is an interface for finding providers and connecting to +// peers. +type ProviderQueryNetwork interface { + ConnectTo(context.Context, peer.ID) error + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID +} + +type providerQueryMessage interface { + debugMessage() string + handle(pqm *ProviderQueryManager) +} + +type receivedProviderMessage struct { + k cid.Cid + p peer.ID +} + +type finishedProviderQueryMessage struct { + k cid.Cid +} + +type newProvideQueryMessage struct { + k cid.Cid + inProgressRequestChan chan<- inProgressRequest +} + +type cancelRequestMessage struct { + incomingProviders chan peer.ID + k cid.Cid +} + +// ProviderQueryManager manages requests to find more providers for blocks +// for bitswap sessions. It's main goals are to: +// - rate limit requests -- don't have too many find provider calls running +// simultaneously +// - connect to found peers and filter them if it can't connect +// - ensure two findprovider calls for the same block don't run concurrently +// - manage timeouts +type ProviderQueryManager struct { + ctx context.Context + network ProviderQueryNetwork + providerQueryMessages chan providerQueryMessage + providerRequestsProcessing chan *findProviderRequest + incomingFindProviderRequests chan *findProviderRequest + + findProviderTimeout time.Duration + timeoutMutex sync.RWMutex + + // do not touch outside the run loop + inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus +} + +// New initializes a new ProviderQueryManager for a given context and a given +// network provider. +func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManager { + return &ProviderQueryManager{ + ctx: ctx, + network: network, + providerQueryMessages: make(chan providerQueryMessage, 16), + providerRequestsProcessing: make(chan *findProviderRequest), + incomingFindProviderRequests: make(chan *findProviderRequest), + inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), + findProviderTimeout: defaultTimeout, + } +} + +// Startup starts processing for the ProviderQueryManager. +func (pqm *ProviderQueryManager) Startup() { + go pqm.run() +} + +type inProgressRequest struct { + providersSoFar []peer.ID + incoming chan peer.ID +} + +// SetFindProviderTimeout changes the timeout for finding providers +func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time.Duration) { + pqm.timeoutMutex.Lock() + pqm.findProviderTimeout = findProviderTimeout + pqm.timeoutMutex.Unlock() +} + +// FindProvidersAsync finds providers for the given block. +func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID { + inProgressRequestChan := make(chan inProgressRequest) + + select { + case pqm.providerQueryMessages <- &newProvideQueryMessage{ + k: k, + inProgressRequestChan: inProgressRequestChan, + }: + case <-pqm.ctx.Done(): + ch := make(chan peer.ID) + close(ch) + return ch + case <-sessionCtx.Done(): + ch := make(chan peer.ID) + close(ch) + return ch + } + + // DO NOT select on sessionCtx. We only want to abort here if we're + // shutting down because we can't actually _cancel_ the request till we + // get to receiveProviders. + var receivedInProgressRequest inProgressRequest + select { + case <-pqm.ctx.Done(): + ch := make(chan peer.ID) + close(ch) + return ch + case receivedInProgressRequest = <-inProgressRequestChan: + } + + return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest) +} + +func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, receivedInProgressRequest inProgressRequest) <-chan peer.ID { + // maintains an unbuffered queue for incoming providers for given request for a given session + // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all + // sessions that queried that CID, without worrying about whether the client code is actually + // reading from the returned channel -- so that the broadcast never blocks + // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd + returnedProviders := make(chan peer.ID) + receivedProviders := append([]peer.ID(nil), receivedInProgressRequest.providersSoFar[0:]...) + incomingProviders := receivedInProgressRequest.incoming + + go func() { + defer close(returnedProviders) + outgoingProviders := func() chan<- peer.ID { + if len(receivedProviders) == 0 { + return nil + } + return returnedProviders + } + nextProvider := func() peer.ID { + if len(receivedProviders) == 0 { + return "" + } + return receivedProviders[0] + } + for len(receivedProviders) > 0 || incomingProviders != nil { + select { + case <-pqm.ctx.Done(): + return + case <-sessionCtx.Done(): + if incomingProviders != nil { + pqm.cancelProviderRequest(k, incomingProviders) + } + return + case provider, ok := <-incomingProviders: + if !ok { + incomingProviders = nil + } else { + receivedProviders = append(receivedProviders, provider) + } + case outgoingProviders() <- nextProvider(): + receivedProviders = receivedProviders[1:] + } + } + }() + return returnedProviders +} + +func (pqm *ProviderQueryManager) cancelProviderRequest(k cid.Cid, incomingProviders chan peer.ID) { + cancelMessageChannel := pqm.providerQueryMessages + for { + select { + case cancelMessageChannel <- &cancelRequestMessage{ + incomingProviders: incomingProviders, + k: k, + }: + cancelMessageChannel = nil + // clear out any remaining providers, in case and "incoming provider" + // messages get processed before our cancel message + case _, ok := <-incomingProviders: + if !ok { + return + } + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) findProviderWorker() { + // findProviderWorker just cycles through incoming provider queries one + // at a time. We have six of these workers running at once + // to let requests go in parallel but keep them rate limited + for { + select { + case fpr, ok := <-pqm.providerRequestsProcessing: + if !ok { + return + } + k := fpr.k + log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) + pqm.timeoutMutex.RLock() + findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) + pqm.timeoutMutex.RUnlock() + providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) + wg := &sync.WaitGroup{} + for p := range providers { + wg.Add(1) + go func(p peer.ID) { + defer wg.Done() + err := pqm.network.ConnectTo(findProviderCtx, p) + if err != nil { + log.Debugf("failed to connect to provider %s: %s", p, err) + return + } + select { + case pqm.providerQueryMessages <- &receivedProviderMessage{ + k: k, + p: p, + }: + case <-pqm.ctx.Done(): + return + } + }(p) + } + wg.Wait() + cancel() + select { + case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ + k: k, + }: + case <-pqm.ctx.Done(): + } + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) providerRequestBufferWorker() { + // the provider request buffer worker just maintains an unbounded + // buffer for incoming provider queries and dispatches to the find + // provider workers as they become available + // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd + var providerQueryRequestBuffer []*findProviderRequest + nextProviderQuery := func() *findProviderRequest { + if len(providerQueryRequestBuffer) == 0 { + return nil + } + return providerQueryRequestBuffer[0] + } + outgoingRequests := func() chan<- *findProviderRequest { + if len(providerQueryRequestBuffer) == 0 { + return nil + } + return pqm.providerRequestsProcessing + } + + for { + select { + case incomingRequest, ok := <-pqm.incomingFindProviderRequests: + if !ok { + return + } + providerQueryRequestBuffer = append(providerQueryRequestBuffer, incomingRequest) + case outgoingRequests() <- nextProviderQuery(): + providerQueryRequestBuffer = providerQueryRequestBuffer[1:] + case <-pqm.ctx.Done(): + return + } + } +} + +func (pqm *ProviderQueryManager) cleanupInProcessRequests() { + for _, requestStatus := range pqm.inProgressRequestStatuses { + for listener := range requestStatus.listeners { + close(listener) + } + requestStatus.cancelFn() + } +} + +func (pqm *ProviderQueryManager) run() { + defer pqm.cleanupInProcessRequests() + + go pqm.providerRequestBufferWorker() + for i := 0; i < maxInProcessRequests; i++ { + go pqm.findProviderWorker() + } + + for { + select { + case nextMessage := <-pqm.providerQueryMessages: + log.Debug(nextMessage.debugMessage()) + nextMessage.handle(pqm) + case <-pqm.ctx.Done(): + return + } + } +} + +func (rpm *receivedProviderMessage) debugMessage() string { + return fmt.Sprintf("Received provider (%s) for cid (%s)", rpm.p.String(), rpm.k.String()) +} + +func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] + if !ok { + log.Errorf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) + return + } + requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) + for listener := range requestStatus.listeners { + select { + case listener <- rpm.p: + case <-pqm.ctx.Done(): + return + } + } +} + +func (fpqm *finishedProviderQueryMessage) debugMessage() string { + return fmt.Sprintf("Finished Provider Query on cid: %s", fpqm.k.String()) +} + +func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] + if !ok { + // we canceled the request as it finished. + return + } + for listener := range requestStatus.listeners { + close(listener) + } + delete(pqm.inProgressRequestStatuses, fpqm.k) + requestStatus.cancelFn() +} + +func (npqm *newProvideQueryMessage) debugMessage() string { + return fmt.Sprintf("New Provider Query on cid: %s", npqm.k.String()) +} + +func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] + if !ok { + ctx, cancelFn := context.WithCancel(pqm.ctx) + requestStatus = &inProgressRequestStatus{ + listeners: make(map[chan peer.ID]struct{}), + ctx: ctx, + cancelFn: cancelFn, + } + pqm.inProgressRequestStatuses[npqm.k] = requestStatus + select { + case pqm.incomingFindProviderRequests <- &findProviderRequest{ + k: npqm.k, + ctx: ctx, + }: + case <-pqm.ctx.Done(): + return + } + } + inProgressChan := make(chan peer.ID) + requestStatus.listeners[inProgressChan] = struct{}{} + select { + case npqm.inProgressRequestChan <- inProgressRequest{ + providersSoFar: requestStatus.providersSoFar, + incoming: inProgressChan, + }: + case <-pqm.ctx.Done(): + } +} + +func (crm *cancelRequestMessage) debugMessage() string { + return fmt.Sprintf("Cancel provider query on cid: %s", crm.k.String()) +} + +func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] + if !ok { + // Request finished while queued. + return + } + _, ok = requestStatus.listeners[crm.incomingProviders] + if !ok { + // Request finished and _restarted_ while queued. + return + } + delete(requestStatus.listeners, crm.incomingProviders) + close(crm.incomingProviders) + if len(requestStatus.listeners) == 0 { + delete(pqm.inProgressRequestStatuses, crm.k) + requestStatus.cancelFn() + } +} diff --git a/internal/providerquerymanager/providerquerymanager_test.go b/internal/providerquerymanager/providerquerymanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..237840195be0209d46013cfd60cde7c9a3d895fc --- /dev/null +++ b/internal/providerquerymanager/providerquerymanager_test.go @@ -0,0 +1,369 @@ +package providerquerymanager + +import ( + "context" + "errors" + "reflect" + "sync" + "testing" + "time" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + + cid "gitlab.dms3.io/dms3/go-cid" + "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type fakeProviderNetwork struct { + peersFound []peer.ID + connectError error + delay time.Duration + connectDelay time.Duration + queriesMadeMutex sync.RWMutex + queriesMade int + liveQueries int +} + +func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { + time.Sleep(fpn.connectDelay) + return fpn.connectError +} + +func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + fpn.queriesMadeMutex.Lock() + fpn.queriesMade++ + fpn.liveQueries++ + fpn.queriesMadeMutex.Unlock() + incomingPeers := make(chan peer.ID) + go func() { + defer close(incomingPeers) + for _, p := range fpn.peersFound { + time.Sleep(fpn.delay) + select { + case <-ctx.Done(): + return + default: + } + select { + case incomingPeers <- p: + case <-ctx.Done(): + return + } + } + fpn.queriesMadeMutex.Lock() + fpn.liveQueries-- + fpn.queriesMadeMutex.Unlock() + }() + + return incomingPeers +} + +func TestNormalSimultaneousFetch(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + keys := testutil.GenerateCids(2) + + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1]) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != 2 { + t.Fatal("Did not dedup provider requests running simultaneously") + } + +} + +func TestDedupingProviderRequests(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + key := testutil.GenerateCids(1)[0] + + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != len(peers) || len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + if !reflect.DeepEqual(firstPeersReceived, secondPeersReceived) { + t.Fatal("Did not receive the same response to both find provider requests") + } + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != 1 { + t.Fatal("Did not dedup provider requests running simultaneously") + } +} + +func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + + // first session will cancel before done + firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) + defer firstCancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key) + secondSessionCtx, secondCancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer secondCancel() + secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(secondPeersReceived) != len(peers) { + t.Fatal("Did not collect all peers for request that was completed") + } + + if len(firstPeersReceived) >= len(peers) { + t.Fatal("Collected all peers on cancelled peer, should have been cancelled immediately") + } + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != 1 { + t.Fatal("Did not dedup provider requests running simultaneously") + } +} + +func TestCancelManagerExitsGracefully(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + managerCtx, managerCancel := context.WithTimeout(ctx, 5*time.Millisecond) + defer managerCancel() + providerQueryManager := New(managerCtx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) >= len(peers) || + len(secondPeersReceived) >= len(peers) { + t.Fatal("Did not cancel requests in progress correctly") + } +} + +func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + connectError: errors.New("not able to connect"), + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + key := testutil.GenerateCids(1)[0] + + sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + + var secondPeersReceived []peer.ID + for p := range secondRequestChan { + secondPeersReceived = append(secondPeersReceived, p) + } + + if len(firstPeersReceived) != 0 || len(secondPeersReceived) != 0 { + t.Fatal("Did not filter out peers with connection issues") + } + +} + +func TestRateLimitingRequests(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 5 * time.Millisecond, + } + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + + keys := testutil.GenerateCids(maxInProcessRequests + 1) + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + var requestChannels []<-chan peer.ID + for i := 0; i < maxInProcessRequests+1; i++ { + requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i])) + } + time.Sleep(20 * time.Millisecond) + fpn.queriesMadeMutex.Lock() + if fpn.liveQueries != maxInProcessRequests { + t.Logf("Queries made: %d\n", fpn.liveQueries) + t.Fatal("Did not limit parallel requests to rate limit") + } + fpn.queriesMadeMutex.Unlock() + for i := 0; i < maxInProcessRequests+1; i++ { + for range requestChannels[i] { + } + } + + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != maxInProcessRequests+1 { + t.Fatal("Did not make all seperate requests") + } +} + +func TestFindProviderTimeout(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + var firstPeersReceived []peer.ID + for p := range firstRequestChan { + firstPeersReceived = append(firstPeersReceived, p) + } + if len(firstPeersReceived) >= len(peers) { + t.Fatal("Find provider request should have timed out, did not") + } +} + +func TestFindProviderPreCanceled(t *testing.T) { + peers := testutil.GeneratePeers(10) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithCancel(ctx) + cancel() + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + if firstRequestChan == nil { + t.Fatal("expected non-nil channel") + } + select { + case <-firstRequestChan: + case <-time.After(10 * time.Millisecond): + t.Fatal("shouldn't have blocked waiting on a closed context") + } +} + +func TestCancelFindProvidersAfterCompletion(t *testing.T) { + peers := testutil.GeneratePeers(2) + fpn := &fakeProviderNetwork{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + ctx := context.Background() + providerQueryManager := New(ctx, fpn) + providerQueryManager.Startup() + providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + keys := testutil.GenerateCids(1) + + sessionCtx, cancel := context.WithCancel(ctx) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + <-firstRequestChan // wait for everything to start. + time.Sleep(10 * time.Millisecond) // wait for the incoming providres to stop. + cancel() // cancel the context. + + timer := time.NewTimer(10 * time.Millisecond) + defer timer.Stop() + for { + select { + case _, ok := <-firstRequestChan: + if !ok { + return + } + case <-timer.C: + t.Fatal("should have finished receiving responses within timeout") + } + } +} diff --git a/internal/session/cidqueue.go b/internal/session/cidqueue.go new file mode 100644 index 0000000000000000000000000000000000000000..dd74008b9ceb5694bf76118b5419363392e72aa5 --- /dev/null +++ b/internal/session/cidqueue.go @@ -0,0 +1,63 @@ +package session + +import cid "gitlab.dms3.io/dms3/go-cid" + +type cidQueue struct { + elems []cid.Cid + eset *cid.Set +} + +func newCidQueue() *cidQueue { + return &cidQueue{eset: cid.NewSet()} +} + +func (cq *cidQueue) Pop() cid.Cid { + for { + if len(cq.elems) == 0 { + return cid.Cid{} + } + + out := cq.elems[0] + cq.elems = cq.elems[1:] + + if cq.eset.Has(out) { + cq.eset.Remove(out) + return out + } + } +} + +func (cq *cidQueue) Cids() []cid.Cid { + // Lazily delete from the list any cids that were removed from the set + if len(cq.elems) > cq.eset.Len() { + i := 0 + for _, c := range cq.elems { + if cq.eset.Has(c) { + cq.elems[i] = c + i++ + } + } + cq.elems = cq.elems[:i] + } + + // Make a copy of the cids + return append([]cid.Cid{}, cq.elems...) +} + +func (cq *cidQueue) Push(c cid.Cid) { + if cq.eset.Visit(c) { + cq.elems = append(cq.elems, c) + } +} + +func (cq *cidQueue) Remove(c cid.Cid) { + cq.eset.Remove(c) +} + +func (cq *cidQueue) Has(c cid.Cid) bool { + return cq.eset.Has(c) +} + +func (cq *cidQueue) Len() int { + return cq.eset.Len() +} diff --git a/internal/session/peerresponsetracker.go b/internal/session/peerresponsetracker.go new file mode 100644 index 0000000000000000000000000000000000000000..34bca7c5c00ef10ce7f7ca1d60dbc0413ae673f2 --- /dev/null +++ b/internal/session/peerresponsetracker.go @@ -0,0 +1,70 @@ +package session + +import ( + "math/rand" + + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +// peerResponseTracker keeps track of how many times each peer was the first +// to send us a block for a given CID (used to rank peers) +type peerResponseTracker struct { + firstResponder map[peer.ID]int +} + +func newPeerResponseTracker() *peerResponseTracker { + return &peerResponseTracker{ + firstResponder: make(map[peer.ID]int), + } +} + +// receivedBlockFrom is called when a block is received from a peer +// (only called first time block is received) +func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { + prt.firstResponder[from]++ +} + +// choose picks a peer from the list of candidate peers, favouring those peers +// that were first to send us previous blocks +func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { + if len(peers) == 0 { + return "" + } + + rnd := rand.Float64() + + // Find the total received blocks for all candidate peers + total := 0 + for _, p := range peers { + total += prt.getPeerCount(p) + } + + // Choose one of the peers with a chance proportional to the number + // of blocks received from that peer + counted := 0.0 + for _, p := range peers { + counted += float64(prt.getPeerCount(p)) / float64(total) + if counted > rnd { + return p + } + } + + // We shouldn't get here unless there is some weirdness with floating point + // math that doesn't quite cover the whole range of peers in the for loop + // so just choose the last peer. + index := len(peers) - 1 + return peers[index] +} + +// getPeerCount returns the number of times the peer was first to send us a +// block +func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { + count, ok := prt.firstResponder[p] + if ok { + return count + } + + // Make sure there is always at least a small chance a new peer + // will be chosen + return 1 +} diff --git a/internal/session/peerresponsetracker_test.go b/internal/session/peerresponsetracker_test.go new file mode 100644 index 0000000000000000000000000000000000000000..db1f504077d89a0349d18c1be104fb0c05b24d66 --- /dev/null +++ b/internal/session/peerresponsetracker_test.go @@ -0,0 +1,117 @@ +package session + +import ( + "math" + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +func TestPeerResponseTrackerInit(t *testing.T) { + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + if prt.choose([]peer.ID{}) != "" { + t.Fatal("expected empty peer ID") + } + if prt.choose([]peer.ID{peers[0]}) != peers[0] { + t.Fatal("expected single peer ID") + } + p := prt.choose(peers) + if p != peers[0] && p != peers[1] { + t.Fatal("expected randomly chosen peer") + } +} + +func TestPeerResponseTrackerProbabilityUnknownPeers(t *testing.T) { + peers := testutil.GeneratePeers(4) + prt := newPeerResponseTracker() + + choices := []int{0, 0, 0, 0} + count := 1000 + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } else if p == peers[3] { + choices[3]++ + } + } + + for _, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c-choices[0])) > 0.2*float64(count) { + t.Fatal("expected unknown peers to have roughly equal chance of being chosen") + } + } +} + +func TestPeerResponseTrackerProbabilityOneKnownOneUnknownPeer(t *testing.T) { + peers := testutil.GeneratePeers(2) + prt := newPeerResponseTracker() + + prt.receivedBlockFrom(peers[0]) + + chooseFirst := 0 + chooseSecond := 0 + for i := 0; i < 1000; i++ { + p := prt.choose(peers) + if p == peers[0] { + chooseFirst++ + } else if p == peers[1] { + chooseSecond++ + } + } + + if chooseSecond == 0 { + t.Fatal("expected unknown peer to occasionally be chosen") + } + if chooseSecond > chooseFirst { + t.Fatal("expected known peer to be chosen more often") + } +} + +func TestPeerResponseTrackerProbabilityProportional(t *testing.T) { + peers := testutil.GeneratePeers(3) + prt := newPeerResponseTracker() + + probabilities := []float64{0.1, 0.6, 0.3} + count := 1000 + for pi, prob := range probabilities { + for i := 0; float64(i) < float64(count)*prob; i++ { + prt.receivedBlockFrom(peers[pi]) + } + } + + var choices []int + for range probabilities { + choices = append(choices, 0) + } + + for i := 0; i < count; i++ { + p := prt.choose(peers) + if p == peers[0] { + choices[0]++ + } else if p == peers[1] { + choices[1]++ + } else if p == peers[2] { + choices[2]++ + } + } + + for i, c := range choices { + if c == 0 { + t.Fatal("expected each peer to be chosen at least once") + } + if math.Abs(float64(c)-(float64(count)*probabilities[i])) > 0.2*float64(count) { + t.Fatal("expected peers to be chosen proportionally to probability") + } + } +} diff --git a/internal/session/sentwantblockstracker.go b/internal/session/sentwantblockstracker.go new file mode 100644 index 0000000000000000000000000000000000000000..1e30ea03b27ba17813f034daad642229370c27f1 --- /dev/null +++ b/internal/session/sentwantblockstracker.go @@ -0,0 +1,33 @@ +package session + +import ( + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +// sentWantBlocksTracker keeps track of which peers we've sent a want-block to +type sentWantBlocksTracker struct { + sentWantBlocks map[peer.ID]map[cid.Cid]struct{} +} + +func newSentWantBlocksTracker() *sentWantBlocksTracker { + return &sentWantBlocksTracker{ + sentWantBlocks: make(map[peer.ID]map[cid.Cid]struct{}), + } +} + +func (s *sentWantBlocksTracker) addSentWantBlocksTo(p peer.ID, ks []cid.Cid) { + cids, ok := s.sentWantBlocks[p] + if !ok { + cids = make(map[cid.Cid]struct{}, len(ks)) + s.sentWantBlocks[p] = cids + } + for _, c := range ks { + cids[c] = struct{}{} + } +} + +func (s *sentWantBlocksTracker) haveSentWantBlockTo(p peer.ID, c cid.Cid) bool { + _, ok := s.sentWantBlocks[p][c] + return ok +} diff --git a/internal/session/sentwantblockstracker_test.go b/internal/session/sentwantblockstracker_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bbf46177ebaa07d9b0c58f4e394b387cdc6fb180 --- /dev/null +++ b/internal/session/sentwantblockstracker_test.go @@ -0,0 +1,28 @@ +package session + +import ( + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" +) + +func TestSendWantBlocksTracker(t *testing.T) { + peers := testutil.GeneratePeers(2) + cids := testutil.GenerateCids(2) + swbt := newSentWantBlocksTracker() + + if swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected not to have sent anything yet") + } + + swbt.addSentWantBlocksTo(peers[0], cids) + if !swbt.haveSentWantBlockTo(peers[0], cids[0]) { + t.Fatal("expected to have sent cid to peer") + } + if !swbt.haveSentWantBlockTo(peers[0], cids[1]) { + t.Fatal("expected to have sent cid to peer") + } + if swbt.haveSentWantBlockTo(peers[1], cids[0]) { + t.Fatal("expected not to have sent cid to peer") + } +} diff --git a/internal/session/session.go b/internal/session/session.go new file mode 100644 index 0000000000000000000000000000000000000000..2f34b04483adcfebc3bd74ebc994741f01bfac41 --- /dev/null +++ b/internal/session/session.go @@ -0,0 +1,507 @@ +package session + +import ( + "context" + "time" + + bsbpm "gitlab.dms3.io/dms3/go-bitswap/internal/blockpresencemanager" + bsgetter "gitlab.dms3.io/dms3/go-bitswap/internal/getter" + notifications "gitlab.dms3.io/dms3/go-bitswap/internal/notifications" + bspm "gitlab.dms3.io/dms3/go-bitswap/internal/peermanager" + bssim "gitlab.dms3.io/dms3/go-bitswap/internal/sessioninterestmanager" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + logging "gitlab.dms3.io/dms3/go-log" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" + loggables "gitlab.dms3.io/p2p/go-p2p-loggables" + "go.uber.org/zap" +) + +var log = logging.Logger("bs:sess") +var sflog = log.Desugar() + +const ( + broadcastLiveWantsLimit = 64 +) + +// PeerManager keeps track of which sessions are interested in which peers +// and takes care of sending wants for the sessions +type PeerManager interface { + // RegisterSession tells the PeerManager that the session is interested + // in a peer's connection state + RegisterSession(peer.ID, bspm.Session) + // UnregisterSession tells the PeerManager that the session is no longer + // interested in a peer's connection state + UnregisterSession(uint64) + // SendWants tells the PeerManager to send wants to the given peer + SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) + // BroadcastWantHaves sends want-haves to all connected peers (used for + // session discovery) + BroadcastWantHaves(context.Context, []cid.Cid) + // SendCancels tells the PeerManager to send cancels to all peers + SendCancels(context.Context, []cid.Cid) +} + +// SessionManager manages all the sessions +type SessionManager interface { + // Remove a session (called when the session shuts down) + RemoveSession(sesid uint64) + // Cancel wants (called when a call to GetBlocks() is cancelled) + CancelSessionWants(sid uint64, wants []cid.Cid) +} + +// SessionPeerManager keeps track of peers in the session +type SessionPeerManager interface { + // PeersDiscovered indicates if any peers have been discovered yet + PeersDiscovered() bool + // Shutdown the SessionPeerManager + Shutdown() + // Adds a peer to the session, returning true if the peer is new + AddPeer(peer.ID) bool + // Removes a peer from the session, returning true if the peer existed + RemovePeer(peer.ID) bool + // All peers in the session + Peers() []peer.ID + // Whether there are any peers in the session + HasPeers() bool + // Protect connection from being pruned by the connection manager + ProtectConnection(peer.ID) +} + +// ProviderFinder is used to find providers for a given key +type ProviderFinder interface { + // FindProvidersAsync searches for peers that provide the given CID + FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID +} + +// opType is the kind of operation that is being processed by the event loop +type opType int + +const ( + // Receive blocks + opReceive opType = iota + // Want blocks + opWant + // Cancel wants + opCancel + // Broadcast want-haves + opBroadcast + // Wants sent to peers + opWantsSent +) + +type op struct { + op opType + keys []cid.Cid +} + +// Session holds state for an individual bitswap transfer operation. +// This allows bitswap to make smarter decisions about who to send wantlist +// info to, and who to request blocks from. +type Session struct { + // dependencies + ctx context.Context + shutdown func() + sm SessionManager + pm PeerManager + sprm SessionPeerManager + providerFinder ProviderFinder + sim *bssim.SessionInterestManager + + sw sessionWants + sws sessionWantSender + + latencyTrkr latencyTracker + + // channels + incoming chan op + tickDelayReqs chan time.Duration + + // do not touch outside run loop + idleTick *time.Timer + periodicSearchTimer *time.Timer + baseTickDelay time.Duration + consecutiveTicks int + initialSearchDelay time.Duration + periodicSearchDelay delay.D + // identifiers + notif notifications.PubSub + uuid loggables.DeferredMap + id uint64 + + self peer.ID +} + +// New creates a new bitswap session whose lifetime is bounded by the +// given context. +func New( + ctx context.Context, + sm SessionManager, + id uint64, + sprm SessionPeerManager, + providerFinder ProviderFinder, + sim *bssim.SessionInterestManager, + pm PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + initialSearchDelay time.Duration, + periodicSearchDelay delay.D, + self peer.ID) *Session { + + ctx, cancel := context.WithCancel(ctx) + s := &Session{ + sw: newSessionWants(broadcastLiveWantsLimit), + tickDelayReqs: make(chan time.Duration), + ctx: ctx, + shutdown: cancel, + sm: sm, + pm: pm, + sprm: sprm, + providerFinder: providerFinder, + sim: sim, + incoming: make(chan op, 128), + latencyTrkr: latencyTracker{}, + notif: notif, + uuid: loggables.Uuid("GetBlockRequest"), + baseTickDelay: time.Millisecond * 500, + id: id, + initialSearchDelay: initialSearchDelay, + periodicSearchDelay: periodicSearchDelay, + self: self, + } + s.sws = newSessionWantSender(id, pm, sprm, sm, bpm, s.onWantsSent, s.onPeersExhausted) + + go s.run(ctx) + + return s +} + +func (s *Session) ID() uint64 { + return s.id +} + +func (s *Session) Shutdown() { + s.shutdown() +} + +// ReceiveFrom receives incoming blocks from the given peer. +func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // The SessionManager tells each Session about all keys that it may be + // interested in. Here the Session filters the keys to the ones that this + // particular Session is interested in. + interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) + ks = interestedRes[0] + haves = interestedRes[1] + dontHaves = interestedRes[2] + s.logReceiveFrom(from, ks, haves, dontHaves) + + // Inform the session want sender that a message has been received + s.sws.Update(from, ks, haves, dontHaves) + + if len(ks) == 0 { + return + } + + // Inform the session that blocks have been received + select { + case s.incoming <- op{op: opReceive, keys: ks}: + case <-s.ctx.Done(): + } +} + +func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // Save some CPU cycles if log level is higher than debug + if ce := sflog.Check(zap.DebugLevel, "Bitswap <- rcv message"); ce == nil { + return + } + + for _, c := range interestedKs { + log.Debugw("Bitswap <- block", "local", s.self, "from", from, "cid", c, "session", s.id) + } + for _, c := range haves { + log.Debugw("Bitswap <- HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) + } + for _, c := range dontHaves { + log.Debugw("Bitswap <- DONT_HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) + } +} + +// GetBlock fetches a single block. +func (s *Session) GetBlock(parent context.Context, k cid.Cid) (blocks.Block, error) { + return bsgetter.SyncGetBlock(parent, k, s.GetBlocks) +} + +// GetBlocks fetches a set of blocks within the context of this session and +// returns a channel that found blocks will be returned on. No order is +// guaranteed on the returned blocks. +func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { + // ctx = logging.ContextWithLoggable(ctx, s.uuid) + + return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, + func(ctx context.Context, keys []cid.Cid) { + select { + case s.incoming <- op{op: opWant, keys: keys}: + case <-ctx.Done(): + case <-s.ctx.Done(): + } + }, + func(keys []cid.Cid) { + select { + case s.incoming <- op{op: opCancel, keys: keys}: + case <-s.ctx.Done(): + } + }, + ) +} + +// SetBaseTickDelay changes the rate at which ticks happen. +func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { + select { + case s.tickDelayReqs <- baseTickDelay: + case <-s.ctx.Done(): + } +} + +// onWantsSent is called when wants are sent to a peer by the session wants sender +func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) + s.nonBlockingEnqueue(op{op: opWantsSent, keys: allBlks}) +} + +// onPeersExhausted is called when all available peers have sent DONT_HAVE for +// a set of cids (or all peers become unavailable) +func (s *Session) onPeersExhausted(ks []cid.Cid) { + s.nonBlockingEnqueue(op{op: opBroadcast, keys: ks}) +} + +// We don't want to block the sessionWantSender if the incoming channel +// is full. So if we can't immediately send on the incoming channel spin +// it off into a go-routine. +func (s *Session) nonBlockingEnqueue(o op) { + select { + case s.incoming <- o: + default: + go func() { + select { + case s.incoming <- o: + case <-s.ctx.Done(): + } + }() + } +} + +// Session run loop -- everything in this function should not be called +// outside of this loop +func (s *Session) run(ctx context.Context) { + go s.sws.Run() + + s.idleTick = time.NewTimer(s.initialSearchDelay) + s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) + for { + select { + case oper := <-s.incoming: + switch oper.op { + case opReceive: + // Received blocks + s.handleReceive(oper.keys) + case opWant: + // Client wants blocks + s.wantBlocks(ctx, oper.keys) + case opCancel: + // Wants were cancelled + s.sw.CancelPending(oper.keys) + s.sws.Cancel(oper.keys) + case opWantsSent: + // Wants were sent to a peer + s.sw.WantsSent(oper.keys) + case opBroadcast: + // Broadcast want-haves to all peers + s.broadcast(ctx, oper.keys) + default: + panic("unhandled operation") + } + case <-s.idleTick.C: + // The session hasn't received blocks for a while, broadcast + s.broadcast(ctx, nil) + case <-s.periodicSearchTimer.C: + // Periodically search for a random live want + s.handlePeriodicSearch(ctx) + case baseTickDelay := <-s.tickDelayReqs: + // Set the base tick delay + s.baseTickDelay = baseTickDelay + case <-ctx.Done(): + // Shutdown + s.handleShutdown() + return + } + } +} + +// Called when the session hasn't received any blocks for some time, or when +// all peers in the session have sent DONT_HAVE for a particular set of CIDs. +// Send want-haves to all connected peers, and search for new peers with the CID. +func (s *Session) broadcast(ctx context.Context, wants []cid.Cid) { + // If this broadcast is because of an idle timeout (we haven't received + // any blocks for a while) then broadcast all pending wants + if wants == nil { + wants = s.sw.PrepareBroadcast() + } + + // Broadcast a want-have for the live wants to everyone we're connected to + s.broadcastWantHaves(ctx, wants) + + // do not find providers on consecutive ticks + // -- just rely on periodic search widening + if len(wants) > 0 && (s.consecutiveTicks == 0) { + // Search for providers who have the first want in the list. + // Typically if the provider has the first block they will have + // the rest of the blocks also. + log.Debugw("FindMorePeers", "session", s.id, "cid", wants[0], "pending", len(wants)) + s.findMorePeers(ctx, wants[0]) + } + s.resetIdleTick() + + // If we have live wants record a consecutive tick + if s.sw.HasLiveWants() { + s.consecutiveTicks++ + } +} + +// handlePeriodicSearch is called periodically to search for providers of a +// randomly chosen CID in the sesssion. +func (s *Session) handlePeriodicSearch(ctx context.Context) { + randomWant := s.sw.RandomLiveWant() + if !randomWant.Defined() { + return + } + + // TODO: come up with a better strategy for determining when to search + // for new providers for blocks. + s.findMorePeers(ctx, randomWant) + + s.broadcastWantHaves(ctx, []cid.Cid{randomWant}) + + s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) +} + +// findMorePeers attempts to find more peers for a session by searching for +// providers for the given Cid +func (s *Session) findMorePeers(ctx context.Context, c cid.Cid) { + go func(k cid.Cid) { + for p := range s.providerFinder.FindProvidersAsync(ctx, k) { + // When a provider indicates that it has a cid, it's equivalent to + // the providing peer sending a HAVE + s.sws.Update(p, nil, []cid.Cid{c}, nil) + } + }(c) +} + +// handleShutdown is called when the session shuts down +func (s *Session) handleShutdown() { + // Stop the idle timer + s.idleTick.Stop() + // Shut down the session peer manager + s.sprm.Shutdown() + // Shut down the sessionWantSender (blocks until sessionWantSender stops + // sending) + s.sws.Shutdown() + // Signal to the SessionManager that the session has been shutdown + // and can be cleaned up + s.sm.RemoveSession(s.id) +} + +// handleReceive is called when the session receives blocks from a peer +func (s *Session) handleReceive(ks []cid.Cid) { + // Record which blocks have been received and figure out the total latency + // for fetching the blocks + wanted, totalLatency := s.sw.BlocksReceived(ks) + if len(wanted) == 0 { + return + } + + // Record latency + s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) + + // Inform the SessionInterestManager that this session is no longer + // expecting to receive the wanted keys + s.sim.RemoveSessionWants(s.id, wanted) + + s.idleTick.Stop() + + // We've received new wanted blocks, so reset the number of ticks + // that have occurred since the last new block + s.consecutiveTicks = 0 + + s.resetIdleTick() +} + +// wantBlocks is called when blocks are requested by the client +func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { + if len(newks) > 0 { + // Inform the SessionInterestManager that this session is interested in the keys + s.sim.RecordSessionInterest(s.id, newks) + // Tell the sessionWants tracker that that the wants have been requested + s.sw.BlocksRequested(newks) + // Tell the sessionWantSender that the blocks have been requested + s.sws.Add(newks) + } + + // If we have discovered peers already, the sessionWantSender will + // send wants to them + if s.sprm.PeersDiscovered() { + return + } + + // No peers discovered yet, broadcast some want-haves + ks := s.sw.GetNextWants() + if len(ks) > 0 { + log.Infow("No peers - broadcasting", "session", s.id, "want-count", len(ks)) + s.broadcastWantHaves(ctx, ks) + } +} + +// Send want-haves to all connected peers +func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { + log.Debugw("broadcastWantHaves", "session", s.id, "cids", wants) + s.pm.BroadcastWantHaves(ctx, wants) +} + +// The session will broadcast if it has outstanding wants and doesn't receive +// any blocks for some time. +// The length of time is calculated +// - initially +// as a fixed delay +// - once some blocks are received +// from a base delay and average latency, with a backoff +func (s *Session) resetIdleTick() { + var tickDelay time.Duration + if !s.latencyTrkr.hasLatency() { + tickDelay = s.initialSearchDelay + } else { + avLat := s.latencyTrkr.averageLatency() + tickDelay = s.baseTickDelay + (3 * avLat) + } + tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) + s.idleTick.Reset(tickDelay) +} + +// latencyTracker keeps track of the average latency between sending a want +// and receiving the corresponding block +type latencyTracker struct { + totalLatency time.Duration + count int +} + +func (lt *latencyTracker) hasLatency() bool { + return lt.totalLatency > 0 && lt.count > 0 +} + +func (lt *latencyTracker) averageLatency() time.Duration { + return lt.totalLatency / time.Duration(lt.count) +} + +func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { + lt.totalLatency += totalLatency + lt.count += count +} diff --git a/internal/session/session_test.go b/internal/session/session_test.go new file mode 100644 index 0000000000000000000000000000000000000000..44ba502e23250e8affefcecbec6502eb3557d13b --- /dev/null +++ b/internal/session/session_test.go @@ -0,0 +1,592 @@ +package session + +import ( + "context" + "sync" + "testing" + "time" + + bsbpm "gitlab.dms3.io/dms3/go-bitswap/internal/blockpresencemanager" + notifications "gitlab.dms3.io/dms3/go-bitswap/internal/notifications" + bspm "gitlab.dms3.io/dms3/go-bitswap/internal/peermanager" + bssim "gitlab.dms3.io/dms3/go-bitswap/internal/sessioninterestmanager" + bsspm "gitlab.dms3.io/dms3/go-bitswap/internal/sessionpeermanager" + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" + blocksutil "gitlab.dms3.io/dms3/go-dms3-blocksutil" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type mockSessionMgr struct { + lk sync.Mutex + removeSession bool + cancels []cid.Cid +} + +func newMockSessionMgr() *mockSessionMgr { + return &mockSessionMgr{} +} + +func (msm *mockSessionMgr) removeSessionCalled() bool { + msm.lk.Lock() + defer msm.lk.Unlock() + return msm.removeSession +} + +func (msm *mockSessionMgr) cancelled() []cid.Cid { + msm.lk.Lock() + defer msm.lk.Unlock() + return msm.cancels +} + +func (msm *mockSessionMgr) RemoveSession(sesid uint64) { + msm.lk.Lock() + defer msm.lk.Unlock() + msm.removeSession = true +} + +func (msm *mockSessionMgr) CancelSessionWants(sid uint64, wants []cid.Cid) { + msm.lk.Lock() + defer msm.lk.Unlock() + msm.cancels = append(msm.cancels, wants...) +} + +func newFakeSessionPeerManager() *bsspm.SessionPeerManager { + return bsspm.New(1, newFakePeerTagger()) +} + +func newFakePeerTagger() *fakePeerTagger { + return &fakePeerTagger{ + protectedPeers: make(map[peer.ID]map[string]struct{}), + } +} + +type fakePeerTagger struct { + lk sync.Mutex + protectedPeers map[peer.ID]map[string]struct{} +} + +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, val int) {} +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) {} + +func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + tags, ok := fpt.protectedPeers[p] + if !ok { + tags = make(map[string]struct{}) + fpt.protectedPeers[p] = tags + } + tags[tag] = struct{}{} +} + +func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + if tags, ok := fpt.protectedPeers[p]; ok { + delete(tags, tag) + return len(tags) > 0 + } + + return false +} + +func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + return len(fpt.protectedPeers[p]) > 0 +} + +type fakeProviderFinder struct { + findMorePeersRequested chan cid.Cid +} + +func newFakeProviderFinder() *fakeProviderFinder { + return &fakeProviderFinder{ + findMorePeersRequested: make(chan cid.Cid, 1), + } +} + +func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID { + go func() { + select { + case fpf.findMorePeersRequested <- k: + case <-ctx.Done(): + } + }() + + return make(chan peer.ID) +} + +type wantReq struct { + cids []cid.Cid +} + +type fakePeerManager struct { + wantReqs chan wantReq +} + +func newFakePeerManager() *fakePeerManager { + return &fakePeerManager{ + wantReqs: make(chan wantReq, 1), + } +} + +func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} +func (pm *fakePeerManager) UnregisterSession(uint64) {} +func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} +func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Cid) { + select { + case pm.wantReqs <- wantReq{cids}: + case <-ctx.Done(): + } +} +func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) {} + +func TestSessionGetBlocks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + + _, err := session.GetBlocks(ctx, cids) + + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + receivedWantReq := <-fpm.wantReqs + + // Should have registered session's interest in blocks + intSes := sim.FilterSessionInterested(id, cids) + if !testutil.MatchKeysIgnoreOrder(intSes[0], cids) { + t.Fatal("did not register session interest in blocks") + } + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + t.Fatal("did not enqueue correct initial number of wants") + } + + // Simulate receiving HAVEs from several peers + peers := testutil.GeneratePeers(5) + for i, p := range peers { + blk := blks[testutil.IndexOf(blks, receivedWantReq.cids[i])] + session.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{blk.Cid()}, []cid.Cid{}) + } + + time.Sleep(10 * time.Millisecond) + + // Verify new peers were recorded + if !testutil.MatchPeersIgnoreOrder(fspm.Peers(), peers) { + t.Fatal("peers not recorded by the peer manager") + } + + // Verify session still wants received blocks + _, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") + } + + // Simulate receiving DONT_HAVE for a CID + session.ReceiveFrom(peers[0], []cid.Cid{}, []cid.Cid{}, []cid.Cid{blks[0].Cid()}) + + time.Sleep(10 * time.Millisecond) + + // Verify session still wants received blocks + _, unwanted = sim.SplitWantedUnwanted(blks) + if len(unwanted) > 0 { + t.Fatal("all blocks should still be wanted") + } + + // Simulate receiving block for a CID + session.ReceiveFrom(peers[1], []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + time.Sleep(10 * time.Millisecond) + + // Verify session no longer wants received block + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(unwanted) != 1 || !unwanted[0].Cid().Equals(blks[0].Cid()) { + t.Fatal("session wants block that has already been received") + } + if len(wanted) != len(blks)-1 { + t.Fatal("session wants incorrect number of blocks") + } + + // Shut down session + cancel() + + time.Sleep(10 * time.Millisecond) + + // Verify session was removed + if !sm.removeSessionCalled() { + t.Fatal("expected session to be removed") + } +} + +func TestSessionFindMorePeers(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 900*time.Millisecond) + defer cancel() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + session.SetBaseTickDelay(200 * time.Microsecond) + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(broadcastLiveWantsLimit * 2) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // The session should initially broadcast want-haves + select { + case <-fpm.wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make first want request ") + } + + // receive a block to trigger a tick reset + time.Sleep(20 * time.Millisecond) // need to make sure some latency registers + // or there will be no tick set -- time precision on Windows in go is in the + // millisecond range + p := testutil.GeneratePeers(1)[0] + + blk := blks[0] + session.ReceiveFrom(p, []cid.Cid{blk.Cid()}, []cid.Cid{}, []cid.Cid{}) + + // The session should now time out waiting for a response and broadcast + // want-haves again + select { + case <-fpm.wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make second want request ") + } + + // The session should keep broadcasting periodically until it receives a response + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + t.Fatal("did not rebroadcast whole live list") + } + // Make sure the first block is not included because it has already + // been received + for _, c := range receivedWantReq.cids { + if c.Equals(cids[0]) { + t.Fatal("should not braodcast block that was already received") + } + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // The session should eventually try to find more peers + select { + case <-fpf.findMorePeersRequested: + case <-ctx.Done(): + t.Fatal("Did not find more peers") + } +} + +func TestSessionOnPeersExhausted(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(broadcastLiveWantsLimit + 5) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + _, err := session.GetBlocks(ctx, cids) + + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + receivedWantReq := <-fpm.wantReqs + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != broadcastLiveWantsLimit { + t.Fatal("did not enqueue correct initial number of wants") + } + + // Signal that all peers have send DONT_HAVE for two of the wants + session.onPeersExhausted(cids[len(cids)-2:]) + + // Wait for want request + receivedWantReq = <-fpm.wantReqs + + // Should have sent out broadcast request for wants + if len(receivedWantReq.cids) != 2 { + t.Fatal("did not enqueue correct initial number of wants") + } +} + +func TestSessionFailingToGetFirstBlock(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, 10*time.Millisecond, delay.Fixed(100*time.Millisecond), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(4) + var cids []cid.Cid + for _, block := range blks { + cids = append(cids, block.Cid()) + } + startTick := time.Now() + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // The session should initially broadcast want-haves + select { + case <-fpm.wantReqs: + case <-ctx.Done(): + t.Fatal("Did not make first want request ") + } + + // Verify a broadcast was made + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Wait for a request to find more peers to occur + select { + case k := <-fpf.findMorePeersRequested: + if testutil.IndexOf(blks, k) == -1 { + t.Fatal("did not rebroadcast an active want") + } + case <-ctx.Done(): + t.Fatal("Did not find more peers") + } + firstTickLength := time.Since(startTick) + + // Wait for another broadcast to occur + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Wait for another broadcast to occur + startTick = time.Now() + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Tick should take longer + consecutiveTickLength := time.Since(startTick) + if firstTickLength > consecutiveTickLength { + t.Fatal("Should have increased tick length after first consecutive tick") + } + + // Wait for another broadcast to occur + startTick = time.Now() + select { + case receivedWantReq := <-fpm.wantReqs: + if len(receivedWantReq.cids) < len(cids) { + t.Fatal("did not rebroadcast whole live list") + } + case <-ctx.Done(): + t.Fatal("Never rebroadcast want list") + } + + // Tick should take longer + secondConsecutiveTickLength := time.Since(startTick) + if consecutiveTickLength > secondConsecutiveTickLength { + t.Fatal("Should have increased tick length after first consecutive tick") + } + + // Should not have tried to find peers on consecutive ticks + select { + case <-fpf.findMorePeersRequested: + t.Fatal("Should not have tried to find peers on consecutive ticks") + default: + } + + // Wait for rebroadcast to occur + select { + case k := <-fpf.findMorePeersRequested: + if testutil.IndexOf(blks, k) == -1 { + t.Fatal("did not rebroadcast an active want") + } + case <-ctx.Done(): + t.Fatal("Did not rebroadcast to find more peers") + } +} + +func TestSessionCtxCancelClosesGetBlocksChannel(t *testing.T) { + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + + // Create a new session with its own context + sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + + timerCtx, timerCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer timerCancel() + + // Request a block with a new context + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(1) + getctx, getcancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer getcancel() + + getBlocksCh, err := session.GetBlocks(getctx, []cid.Cid{blks[0].Cid()}) + if err != nil { + t.Fatal("error getting blocks") + } + + // Cancel the session context + sesscancel() + + // Expect the GetBlocks() channel to be closed + select { + case _, ok := <-getBlocksCh: + if ok { + t.Fatal("expected channel to be closed but was not closed") + } + case <-timerCtx.Done(): + t.Fatal("expected channel to be closed before timeout") + } + + time.Sleep(10 * time.Millisecond) + + // Expect RemoveSession to be called + if !sm.removeSessionCalled() { + t.Fatal("expected onShutdown to be called") + } +} + +func TestSessionOnShutdownCalled(t *testing.T) { + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + + // Create a new session with its own context + sessctx, sesscancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer sesscancel() + session := New(sessctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + + // Shutdown the session + session.Shutdown() + + time.Sleep(10 * time.Millisecond) + + // Expect RemoveSession to be called + if !sm.removeSessionCalled() { + t.Fatal("expected onShutdown to be called") + } +} + +func TestSessionReceiveMessageAfterCtxCancel(t *testing.T) { + ctx, cancelCtx := context.WithTimeout(context.Background(), 20*time.Millisecond) + fpm := newFakePeerManager() + fspm := newFakeSessionPeerManager() + fpf := newFakeProviderFinder() + + sim := bssim.New() + bpm := bsbpm.New() + notif := notifications.New() + defer notif.Shutdown() + id := testutil.GenerateSessionID() + sm := newMockSessionMgr() + session := New(ctx, sm, id, fspm, fpf, sim, fpm, bpm, notif, time.Second, delay.Fixed(time.Minute), "") + blockGenerator := blocksutil.NewBlockGenerator() + blks := blockGenerator.Blocks(2) + cids := []cid.Cid{blks[0].Cid(), blks[1].Cid()} + + _, err := session.GetBlocks(ctx, cids) + if err != nil { + t.Fatal("error getting blocks") + } + + // Wait for initial want request + <-fpm.wantReqs + + // Shut down session + cancelCtx() + + // Simulate receiving block for a CID + peer := testutil.GeneratePeers(1)[0] + session.ReceiveFrom(peer, []cid.Cid{blks[0].Cid()}, []cid.Cid{}, []cid.Cid{}) + + time.Sleep(5 * time.Millisecond) + + // If we don't get a panic then the test is considered passing +} diff --git a/internal/session/sessionwants.go b/internal/session/sessionwants.go new file mode 100644 index 0000000000000000000000000000000000000000..10027dffda934aee8dab450646d10c2056374eca --- /dev/null +++ b/internal/session/sessionwants.go @@ -0,0 +1,193 @@ +package session + +import ( + "fmt" + "math/rand" + "time" + + cid "gitlab.dms3.io/dms3/go-cid" +) + +// liveWantsOrder and liveWants will get out of sync as blocks are received. +// This constant is the maximum amount to allow them to be out of sync before +// cleaning up the ordering array. +const liveWantsOrderGCLimit = 32 + +// sessionWants keeps track of which cids are waiting to be sent out, and which +// peers are "live" - ie, we've sent a request but haven't received a block yet +type sessionWants struct { + // The wants that have not yet been sent out + toFetch *cidQueue + // Wants that have been sent but have not received a response + liveWants map[cid.Cid]time.Time + // The order in which wants were requested + liveWantsOrder []cid.Cid + // The maximum number of want-haves to send in a broadcast + broadcastLimit int +} + +func newSessionWants(broadcastLimit int) sessionWants { + return sessionWants{ + toFetch: newCidQueue(), + liveWants: make(map[cid.Cid]time.Time), + broadcastLimit: broadcastLimit, + } +} + +func (sw *sessionWants) String() string { + return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) +} + +// BlocksRequested is called when the client makes a request for blocks +func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { + for _, k := range newWants { + sw.toFetch.Push(k) + } +} + +// GetNextWants is called when the session has not yet discovered peers with +// the blocks that it wants. It moves as many CIDs from the fetch queue to +// the live wants queue as possible (given the broadcast limit). +// Returns the newly live wants. +func (sw *sessionWants) GetNextWants() []cid.Cid { + now := time.Now() + + // Move CIDs from fetch queue to the live wants queue (up to the broadcast + // limit) + currentLiveCount := len(sw.liveWants) + toAdd := sw.broadcastLimit - currentLiveCount + + var live []cid.Cid + for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { + c := sw.toFetch.Pop() + live = append(live, c) + sw.liveWantsOrder = append(sw.liveWantsOrder, c) + sw.liveWants[c] = now + } + + return live +} + +// WantsSent is called when wants are sent to a peer +func (sw *sessionWants) WantsSent(ks []cid.Cid) { + now := time.Now() + for _, c := range ks { + if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { + sw.toFetch.Remove(c) + sw.liveWantsOrder = append(sw.liveWantsOrder, c) + sw.liveWants[c] = now + } + } +} + +// BlocksReceived removes received block CIDs from the live wants list and +// measures latency. It returns the CIDs of blocks that were actually +// wanted (as opposed to duplicates) and the total latency for all incoming blocks. +func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) { + wanted := make([]cid.Cid, 0, len(ks)) + totalLatency := time.Duration(0) + if len(ks) == 0 { + return wanted, totalLatency + } + + // Filter for blocks that were actually wanted (as opposed to duplicates) + now := time.Now() + for _, c := range ks { + if sw.isWanted(c) { + wanted = append(wanted, c) + + // Measure latency + sentAt, ok := sw.liveWants[c] + if ok && !sentAt.IsZero() { + totalLatency += now.Sub(sentAt) + } + + // Remove the CID from the live wants / toFetch queue + delete(sw.liveWants, c) + sw.toFetch.Remove(c) + } + } + + // If the live wants ordering array is a long way out of sync with the + // live wants map, clean up the ordering array + if len(sw.liveWantsOrder)-len(sw.liveWants) > liveWantsOrderGCLimit { + cleaned := sw.liveWantsOrder[:0] + for _, c := range sw.liveWantsOrder { + if _, ok := sw.liveWants[c]; ok { + cleaned = append(cleaned, c) + } + } + sw.liveWantsOrder = cleaned + } + + return wanted, totalLatency +} + +// PrepareBroadcast saves the current time for each live want and returns the +// live want CIDs up to the broadcast limit. +func (sw *sessionWants) PrepareBroadcast() []cid.Cid { + now := time.Now() + live := make([]cid.Cid, 0, len(sw.liveWants)) + for _, c := range sw.liveWantsOrder { + if _, ok := sw.liveWants[c]; ok { + // No response was received for the want, so reset the sent time + // to now as we're about to broadcast + sw.liveWants[c] = now + + live = append(live, c) + if len(live) == sw.broadcastLimit { + break + } + } + } + + return live +} + +// CancelPending removes the given CIDs from the fetch queue. +func (sw *sessionWants) CancelPending(keys []cid.Cid) { + for _, k := range keys { + sw.toFetch.Remove(k) + } +} + +// LiveWants returns a list of live wants +func (sw *sessionWants) LiveWants() []cid.Cid { + live := make([]cid.Cid, 0, len(sw.liveWants)) + for c := range sw.liveWants { + live = append(live, c) + } + + return live +} + +// RandomLiveWant returns a randomly selected live want +func (sw *sessionWants) RandomLiveWant() cid.Cid { + if len(sw.liveWants) == 0 { + return cid.Cid{} + } + + // picking a random live want + i := rand.Intn(len(sw.liveWants)) + for k := range sw.liveWants { + if i == 0 { + return k + } + i-- + } + return cid.Cid{} +} + +// Has live wants indicates if there are any live wants +func (sw *sessionWants) HasLiveWants() bool { + return len(sw.liveWants) > 0 +} + +// Indicates whether the want is in either of the fetch or live queues +func (sw *sessionWants) isWanted(c cid.Cid) bool { + _, ok := sw.liveWants[c] + if !ok { + ok = sw.toFetch.Has(c) + } + return ok +} diff --git a/internal/session/sessionwants_test.go b/internal/session/sessionwants_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7aec938fe92d8e634aa90319fccf7483d487c62c --- /dev/null +++ b/internal/session/sessionwants_test.go @@ -0,0 +1,189 @@ +package session + +import ( + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" +) + +func TestEmptySessionWants(t *testing.T) { + sw := newSessionWants(broadcastLiveWantsLimit) + + // Expect these functions to return nothing on a new sessionWants + lws := sw.PrepareBroadcast() + if len(lws) > 0 { + t.Fatal("expected no broadcast wants") + } + lws = sw.LiveWants() + if len(lws) > 0 { + t.Fatal("expected no live wants") + } + if sw.HasLiveWants() { + t.Fatal("expected not to have live wants") + } + rw := sw.RandomLiveWant() + if rw.Defined() { + t.Fatal("expected no random want") + } +} + +func TestSessionWants(t *testing.T) { + sw := newSessionWants(5) + cids := testutil.GenerateCids(10) + others := testutil.GenerateCids(1) + + // Add 10 new wants + // toFetch Live + // 9876543210 + sw.BlocksRequested(cids) + + // Get next wants with a limit of 5 + // The first 5 cids should go move into the live queue + // toFetch Live + // 98765 43210 + nextw := sw.GetNextWants() + if len(nextw) != 5 { + t.Fatal("expected 5 next wants") + } + lws := sw.PrepareBroadcast() + if len(lws) != 5 { + t.Fatal("expected 5 broadcast wants", len(lws)) + } + lws = sw.LiveWants() + if len(lws) != 5 { + t.Fatal("expected 5 live wants") + } + if !sw.HasLiveWants() { + t.Fatal("expected to have live wants") + } + rw := sw.RandomLiveWant() + if !rw.Defined() { + t.Fatal("expected random want") + } + + // Two wanted blocks and one other block are received. + // The wanted blocks should be removed from the live wants queue + // (the other block CID should be ignored) + // toFetch Live + // 98765 432__ + recvdCids := []cid.Cid{cids[0], cids[1], others[0]} + sw.BlocksReceived(recvdCids) + lws = sw.LiveWants() + if len(lws) != 3 { + t.Fatal("expected 3 live wants") + } + + // Ask for next wants with a limit of 5 + // Should move 2 wants from toFetch queue to live wants + // toFetch Live + // 987__ 65432 + nextw = sw.GetNextWants() + if len(nextw) != 2 { + t.Fatal("expected 2 next wants") + } + lws = sw.LiveWants() + if len(lws) != 5 { + t.Fatal("expected 5 live wants") + } + + // One wanted block and one dup block are received. + // The wanted block should be removed from the live + // wants queue. + // toFetch Live + // 987 654_2 + recvdCids = []cid.Cid{cids[0], cids[3]} + sw.BlocksReceived(recvdCids) + lws = sw.LiveWants() + if len(lws) != 4 { + t.Fatal("expected 4 live wants") + } + + // One block in the toFetch queue should be cancelled + // toFetch Live + // 9_7 654_2 + sw.CancelPending([]cid.Cid{cids[8]}) + lws = sw.LiveWants() + if len(lws) != 4 { + t.Fatal("expected 4 live wants") + } +} + +func TestPrepareBroadcast(t *testing.T) { + sw := newSessionWants(3) + cids := testutil.GenerateCids(10) + + // Add 6 new wants + // toFetch Live + // 543210 + sw.BlocksRequested(cids[:6]) + + // Get next wants with a limit of 3 + // The first 3 cids should go move into the live queue + // toFetch Live + // 543 210 + sw.GetNextWants() + + // Broadcast should contain wants in order + for i := 0; i < 10; i++ { + ws := sw.PrepareBroadcast() + if len(ws) != 3 { + t.Fatal("should broadcast all live wants") + } + for idx, c := range ws { + if !c.Equals(cids[idx]) { + t.Fatal("broadcast should always return wants in order") + } + } + } + + // One block received + // Remove a cid from the live queue + sw.BlocksReceived(cids[:1]) + // toFetch Live + // 543 21_ + + // Add 4 new wants + // toFetch Live + // 9876543 21 + sw.BlocksRequested(cids[6:]) + + // 2 Wants sent + // toFetch Live + // 98765 4321 + sw.WantsSent(cids[3:5]) + + // Broadcast should contain wants in order + cids = cids[1:] + for i := 0; i < 10; i++ { + ws := sw.PrepareBroadcast() + if len(ws) != 3 { + t.Fatal("should broadcast live wants up to limit", len(ws), len(cids)) + } + for idx, c := range ws { + if !c.Equals(cids[idx]) { + t.Fatal("broadcast should always return wants in order") + } + } + } +} + +// Test that even after GC broadcast returns correct wants +func TestPrepareBroadcastAfterGC(t *testing.T) { + sw := newSessionWants(5) + cids := testutil.GenerateCids(liveWantsOrderGCLimit * 2) + + sw.BlocksRequested(cids) + + // Trigger a sessionWants internal GC of the live wants + sw.BlocksReceived(cids[:liveWantsOrderGCLimit+1]) + cids = cids[:liveWantsOrderGCLimit+1] + + // Broadcast should contain wants in order + ws := sw.PrepareBroadcast() + for i, c := range ws { + if !c.Equals(cids[i]) { + t.Fatal("broadcast should always return wants in order") + } + } +} diff --git a/internal/session/sessionwantsender.go b/internal/session/sessionwantsender.go new file mode 100644 index 0000000000000000000000000000000000000000..b2530bf8aed3d472806b96a370bdebbee5d4852d --- /dev/null +++ b/internal/session/sessionwantsender.go @@ -0,0 +1,768 @@ +package session + +import ( + "context" + + bsbpm "gitlab.dms3.io/dms3/go-bitswap/internal/blockpresencemanager" + + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +const ( + // Maximum number of changes to accept before blocking + changesBufferSize = 128 + // If the session receives this many DONT_HAVEs in a row from a peer, + // it prunes the peer from the session + peerDontHaveLimit = 16 +) + +// BlockPresence indicates whether a peer has a block. +// Note that the order is important, we decide which peer to send a want to +// based on knowing whether peer has the block. eg we're more likely to send +// a want to a peer that has the block than a peer that doesnt have the block +// so BPHave > BPDontHave +type BlockPresence int + +const ( + BPDontHave BlockPresence = iota + BPUnknown + BPHave +) + +// SessionWantsCanceller provides a method to cancel wants +type SessionWantsCanceller interface { + // Cancel wants for this session + CancelSessionWants(sid uint64, wants []cid.Cid) +} + +// update encapsulates a message received by the session +type update struct { + // Which peer sent the update + from peer.ID + // cids of blocks received + ks []cid.Cid + // HAVE message + haves []cid.Cid + // DONT_HAVE message + dontHaves []cid.Cid +} + +// peerAvailability indicates a peer's connection state +type peerAvailability struct { + target peer.ID + available bool +} + +// change can be new wants, a new message received by the session, +// or a change in the connect status of a peer +type change struct { + // new wants requested + add []cid.Cid + // wants cancelled + cancel []cid.Cid + // new message received by session (blocks / HAVEs / DONT_HAVEs) + update update + // peer has connected / disconnected + availability peerAvailability +} + +type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) +type onPeersExhaustedFn func([]cid.Cid) + +// +// sessionWantSender is responsible for sending want-have and want-block to +// peers. For each want, it sends a single optimistic want-block request to +// one peer and want-have requests to all other peers in the session. +// To choose the best peer for the optimistic want-block it maintains a list +// of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and +// consults the peer response tracker (records which peers sent us blocks). +// +type sessionWantSender struct { + // The context is used when sending wants + ctx context.Context + // Called to shutdown the sessionWantSender + shutdown func() + // The sessionWantSender uses the closed channel to signal when it's + // finished shutting down + closed chan struct{} + // The session ID + sessionID uint64 + // A channel that collects incoming changes (events) + changes chan change + // Information about each want indexed by CID + wants map[cid.Cid]*wantInfo + // Keeps track of how many consecutive DONT_HAVEs a peer has sent + peerConsecutiveDontHaves map[peer.ID]int + // Tracks which peers we have send want-block to + swbt *sentWantBlocksTracker + // Tracks the number of blocks each peer sent us + peerRspTrkr *peerResponseTracker + // Sends wants to peers + pm PeerManager + // Keeps track of peers in the session + spm SessionPeerManager + // Cancels wants + canceller SessionWantsCanceller + // Keeps track of which peer has / doesn't have a block + bpm *bsbpm.BlockPresenceManager + // Called when wants are sent + onSend onSendFn + // Called when all peers explicitly don't have a block + onPeersExhausted onPeersExhaustedFn +} + +func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, canceller SessionWantsCanceller, + bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { + + ctx, cancel := context.WithCancel(context.Background()) + sws := sessionWantSender{ + ctx: ctx, + shutdown: cancel, + closed: make(chan struct{}), + sessionID: sid, + changes: make(chan change, changesBufferSize), + wants: make(map[cid.Cid]*wantInfo), + peerConsecutiveDontHaves: make(map[peer.ID]int), + swbt: newSentWantBlocksTracker(), + peerRspTrkr: newPeerResponseTracker(), + + pm: pm, + spm: spm, + canceller: canceller, + bpm: bpm, + onSend: onSend, + onPeersExhausted: onPeersExhausted, + } + + return sws +} + +func (sws *sessionWantSender) ID() uint64 { + return sws.sessionID +} + +// Add is called when new wants are added to the session +func (sws *sessionWantSender) Add(ks []cid.Cid) { + if len(ks) == 0 { + return + } + sws.addChange(change{add: ks}) +} + +// Cancel is called when a request is cancelled +func (sws *sessionWantSender) Cancel(ks []cid.Cid) { + if len(ks) == 0 { + return + } + sws.addChange(change{cancel: ks}) +} + +// Update is called when the session receives a message with incoming blocks +// or HAVE / DONT_HAVE +func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 + if !hasUpdate { + return + } + + sws.addChange(change{ + update: update{from, ks, haves, dontHaves}, + }) +} + +// SignalAvailability is called by the PeerManager to signal that a peer has +// connected / disconnected +func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { + availability := peerAvailability{p, isAvailable} + // Add the change in a non-blocking manner to avoid the possibility of a + // deadlock + sws.addChangeNonBlocking(change{availability: availability}) +} + +// Run is the main loop for processing incoming changes +func (sws *sessionWantSender) Run() { + for { + select { + case ch := <-sws.changes: + sws.onChange([]change{ch}) + case <-sws.ctx.Done(): + // Unregister the session with the PeerManager + sws.pm.UnregisterSession(sws.sessionID) + + // Close the 'closed' channel to signal to Shutdown() that the run + // loop has exited + close(sws.closed) + return + } + } +} + +// Shutdown the sessionWantSender +func (sws *sessionWantSender) Shutdown() { + // Signal to the run loop to stop processing + sws.shutdown() + // Wait for run loop to complete + <-sws.closed +} + +// addChange adds a new change to the queue +func (sws *sessionWantSender) addChange(c change) { + select { + case sws.changes <- c: + case <-sws.ctx.Done(): + } +} + +// addChangeNonBlocking adds a new change to the queue, using a go-routine +// if the change blocks, so as to avoid potential deadlocks +func (sws *sessionWantSender) addChangeNonBlocking(c change) { + select { + case sws.changes <- c: + default: + // changes channel is full, so add change in a go routine instead + go func() { + select { + case sws.changes <- c: + case <-sws.ctx.Done(): + } + }() + } +} + +// collectChanges collects all the changes that have occurred since the last +// invocation of onChange +func (sws *sessionWantSender) collectChanges(changes []change) []change { + for len(changes) < changesBufferSize { + select { + case next := <-sws.changes: + changes = append(changes, next) + default: + return changes + } + } + return changes +} + +// onChange processes the next set of changes +func (sws *sessionWantSender) onChange(changes []change) { + // Several changes may have been recorded since the last time we checked, + // so pop all outstanding changes from the channel + changes = sws.collectChanges(changes) + + // Apply each change + availability := make(map[peer.ID]bool, len(changes)) + cancels := make([]cid.Cid, 0) + var updates []update + for _, chng := range changes { + // Initialize info for new wants + for _, c := range chng.add { + sws.trackWant(c) + } + + // Remove cancelled wants + for _, c := range chng.cancel { + sws.untrackWant(c) + cancels = append(cancels, c) + } + + // Consolidate updates and changes to availability + if chng.update.from != "" { + // If the update includes blocks or haves, treat it as signaling that + // the peer is available + if len(chng.update.ks) > 0 || len(chng.update.haves) > 0 { + p := chng.update.from + availability[p] = true + + // Register with the PeerManager + sws.pm.RegisterSession(p, sws) + } + + updates = append(updates, chng.update) + } + if chng.availability.target != "" { + availability[chng.availability.target] = chng.availability.available + } + } + + // Update peer availability + newlyAvailable, newlyUnavailable := sws.processAvailability(availability) + + // Update wants + dontHaves := sws.processUpdates(updates) + + // Check if there are any wants for which all peers have indicated they + // don't have the want + sws.checkForExhaustedWants(dontHaves, newlyUnavailable) + + // If there are any cancels, send them + if len(cancels) > 0 { + sws.canceller.CancelSessionWants(sws.sessionID, cancels) + } + + // If there are some connected peers, send any pending wants + if sws.spm.HasPeers() { + sws.sendNextWants(newlyAvailable) + } +} + +// processAvailability updates the want queue with any changes in +// peer availability +// It returns the peers that have become +// - newly available +// - newly unavailable +func (sws *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { + var newlyAvailable []peer.ID + var newlyUnavailable []peer.ID + for p, isNowAvailable := range availability { + stateChange := false + if isNowAvailable { + isNewPeer := sws.spm.AddPeer(p) + if isNewPeer { + stateChange = true + newlyAvailable = append(newlyAvailable, p) + } + } else { + wasAvailable := sws.spm.RemovePeer(p) + if wasAvailable { + stateChange = true + newlyUnavailable = append(newlyUnavailable, p) + } + } + + // If the state has changed + if stateChange { + sws.updateWantsPeerAvailability(p, isNowAvailable) + // Reset the count of consecutive DONT_HAVEs received from the + // peer + delete(sws.peerConsecutiveDontHaves, p) + } + } + + return newlyAvailable, newlyUnavailable +} + +// trackWant creates a new entry in the map of CID -> want info +func (sws *sessionWantSender) trackWant(c cid.Cid) { + if _, ok := sws.wants[c]; ok { + return + } + + // Create the want info + wi := newWantInfo(sws.peerRspTrkr) + sws.wants[c] = wi + + // For each available peer, register any information we know about + // whether the peer has the block + for _, p := range sws.spm.Peers() { + sws.updateWantBlockPresence(c, p) + } +} + +// untrackWant removes an entry from the map of CID -> want info +func (sws *sessionWantSender) untrackWant(c cid.Cid) { + delete(sws.wants, c) +} + +// processUpdates processes incoming blocks and HAVE / DONT_HAVEs. +// It returns all DONT_HAVEs. +func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { + // Process received blocks keys + blkCids := cid.NewSet() + for _, upd := range updates { + for _, c := range upd.ks { + blkCids.Add(c) + + // Remove the want + removed := sws.removeWant(c) + if removed != nil { + // Inform the peer tracker that this peer was the first to send + // us the block + sws.peerRspTrkr.receivedBlockFrom(upd.from) + + // Protect the connection to this peer so that we can ensure + // that the connection doesn't get pruned by the connection + // manager + sws.spm.ProtectConnection(upd.from) + } + delete(sws.peerConsecutiveDontHaves, upd.from) + } + } + + // Process received DONT_HAVEs + dontHaves := cid.NewSet() + prunePeers := make(map[peer.ID]struct{}) + for _, upd := range updates { + for _, c := range upd.dontHaves { + // Track the number of consecutive DONT_HAVEs each peer receives + if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { + prunePeers[upd.from] = struct{}{} + } else { + sws.peerConsecutiveDontHaves[upd.from]++ + } + + // If we already received a block for the want, there's no need to + // update block presence etc + if blkCids.Has(c) { + continue + } + + dontHaves.Add(c) + + // Update the block presence for the peer + sws.updateWantBlockPresence(c, upd.from) + + // Check if the DONT_HAVE is in response to a want-block + // (could also be in response to want-have) + if sws.swbt.haveSentWantBlockTo(upd.from, c) { + // If we were waiting for a response from this peer, clear + // sentTo so that we can send the want to another peer + if sentTo, ok := sws.getWantSentTo(c); ok && sentTo == upd.from { + sws.setWantSentTo(c, "") + } + } + } + } + + // Process received HAVEs + for _, upd := range updates { + for _, c := range upd.haves { + // If we haven't already received a block for the want + if !blkCids.Has(c) { + // Update the block presence for the peer + sws.updateWantBlockPresence(c, upd.from) + } + + // Clear the consecutive DONT_HAVE count for the peer + delete(sws.peerConsecutiveDontHaves, upd.from) + delete(prunePeers, upd.from) + } + } + + // If any peers have sent us too many consecutive DONT_HAVEs, remove them + // from the session + for p := range prunePeers { + // Before removing the peer from the session, check if the peer + // sent us a HAVE for a block that we want + for c := range sws.wants { + if sws.bpm.PeerHasBlock(p, c) { + delete(prunePeers, p) + break + } + } + } + if len(prunePeers) > 0 { + go func() { + for p := range prunePeers { + // Peer doesn't have anything we want, so remove it + log.Infof("peer %s sent too many dont haves, removing from session %d", p, sws.ID()) + sws.SignalAvailability(p, false) + } + }() + } + + return dontHaves.Keys() +} + +// checkForExhaustedWants checks if there are any wants for which all peers +// have sent a DONT_HAVE. We call these "exhausted" wants. +func (sws *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { + // If there are no new DONT_HAVEs, and no peers became unavailable, then + // we don't need to check for exhausted wants + if len(dontHaves) == 0 && len(newlyUnavailable) == 0 { + return + } + + // We need to check each want for which we just received a DONT_HAVE + wants := dontHaves + + // If a peer just became unavailable, then we need to check all wants + // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) + if len(newlyUnavailable) > 0 { + // Collect all pending wants + wants = make([]cid.Cid, len(sws.wants)) + for c := range sws.wants { + wants = append(wants, c) + } + + // If the last available peer in the session has become unavailable + // then we need to broadcast all pending wants + if !sws.spm.HasPeers() { + sws.processExhaustedWants(wants) + return + } + } + + // If all available peers for a cid sent a DONT_HAVE, signal to the session + // that we've exhausted available peers + if len(wants) > 0 { + exhausted := sws.bpm.AllPeersDoNotHaveBlock(sws.spm.Peers(), wants) + sws.processExhaustedWants(exhausted) + } +} + +// processExhaustedWants filters the list so that only those wants that haven't +// already been marked as exhausted are passed to onPeersExhausted() +func (sws *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { + newlyExhausted := sws.newlyExhausted(exhausted) + if len(newlyExhausted) > 0 { + sws.onPeersExhausted(newlyExhausted) + } +} + +// convenience structs for passing around want-blocks and want-haves for a peer +type wantSets struct { + wantBlocks *cid.Set + wantHaves *cid.Set +} + +type allWants map[peer.ID]*wantSets + +func (aw allWants) forPeer(p peer.ID) *wantSets { + if _, ok := aw[p]; !ok { + aw[p] = &wantSets{ + wantBlocks: cid.NewSet(), + wantHaves: cid.NewSet(), + } + } + return aw[p] +} + +// sendNextWants sends wants to peers according to the latest information +// about which peers have / dont have blocks +func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { + toSend := make(allWants) + + for c, wi := range sws.wants { + // Ensure we send want-haves to any newly available peers + for _, p := range newlyAvailable { + toSend.forPeer(p).wantHaves.Add(c) + } + + // We already sent a want-block to a peer and haven't yet received a + // response yet + if wi.sentTo != "" { + continue + } + + // All the peers have indicated that they don't have the block + // corresponding to this want, so we must wait to discover more peers + if wi.bestPeer == "" { + // TODO: work this out in real time instead of using bestP? + continue + } + + // Record that we are sending a want-block for this want to the peer + sws.setWantSentTo(c, wi.bestPeer) + + // Send a want-block to the chosen peer + toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) + + // Send a want-have to each other peer + for _, op := range sws.spm.Peers() { + if op != wi.bestPeer { + toSend.forPeer(op).wantHaves.Add(c) + } + } + } + + // Send any wants we've collected + sws.sendWants(toSend) +} + +// sendWants sends want-have and want-blocks to the appropriate peers +func (sws *sessionWantSender) sendWants(sends allWants) { + // For each peer we're sending a request to + for p, snd := range sends { + // Piggyback some other want-haves onto the request to the peer + for _, c := range sws.getPiggybackWantHaves(p, snd.wantBlocks) { + snd.wantHaves.Add(c) + } + + // Send the wants to the peer. + // Note that the PeerManager ensures that we don't sent duplicate + // want-haves / want-blocks to a peer, and that want-blocks take + // precedence over want-haves. + wblks := snd.wantBlocks.Keys() + whaves := snd.wantHaves.Keys() + sws.pm.SendWants(sws.ctx, p, wblks, whaves) + + // Inform the session that we've sent the wants + sws.onSend(p, wblks, whaves) + + // Record which peers we send want-block to + sws.swbt.addSentWantBlocksTo(p, wblks) + } +} + +// getPiggybackWantHaves gets the want-haves that should be piggybacked onto +// a request that we are making to send want-blocks to a peer +func (sws *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { + var whs []cid.Cid + for c := range sws.wants { + // Don't send want-have if we're already sending a want-block + // (or have previously) + if !wantBlocks.Has(c) && !sws.swbt.haveSentWantBlockTo(p, c) { + whs = append(whs, c) + } + } + return whs +} + +// newlyExhausted filters the list of keys for wants that have not already +// been marked as exhausted (all peers indicated they don't have the block) +func (sws *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { + var res []cid.Cid + for _, c := range ks { + if wi, ok := sws.wants[c]; ok { + if !wi.exhausted { + res = append(res, c) + wi.exhausted = true + } + } + } + return res +} + +// removeWant is called when the corresponding block is received +func (sws *sessionWantSender) removeWant(c cid.Cid) *wantInfo { + if wi, ok := sws.wants[c]; ok { + delete(sws.wants, c) + return wi + } + return nil +} + +// updateWantsPeerAvailability is called when the availability changes for a +// peer. It updates all the wants accordingly. +func (sws *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { + for c, wi := range sws.wants { + if isNowAvailable { + sws.updateWantBlockPresence(c, p) + } else { + wi.removePeer(p) + } + } +} + +// updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given +// want / peer +func (sws *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { + wi, ok := sws.wants[c] + if !ok { + return + } + + // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the + // block presence for the peer / cid combination + if sws.bpm.PeerHasBlock(p, c) { + wi.setPeerBlockPresence(p, BPHave) + } else if sws.bpm.PeerDoesNotHaveBlock(p, c) { + wi.setPeerBlockPresence(p, BPDontHave) + } else { + wi.setPeerBlockPresence(p, BPUnknown) + } +} + +// Which peer was the want sent to +func (sws *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { + if wi, ok := sws.wants[c]; ok { + return wi.sentTo, true + } + return "", false +} + +// Record which peer the want was sent to +func (sws *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { + if wi, ok := sws.wants[c]; ok { + wi.sentTo = p + } +} + +// wantInfo keeps track of the information for a want +type wantInfo struct { + // Tracks HAVE / DONT_HAVE sent to us for the want by each peer + blockPresence map[peer.ID]BlockPresence + // The peer that we've sent a want-block to (cleared when we get a response) + sentTo peer.ID + // The "best" peer to send the want to next + bestPeer peer.ID + // Keeps track of how many hits / misses each peer has sent us for wants + // in the session + peerRspTrkr *peerResponseTracker + // true if all known peers have sent a DONT_HAVE for this want + exhausted bool +} + +// func newWantInfo(prt *peerResponseTracker, c cid.Cid, startIndex int) *wantInfo { +func newWantInfo(prt *peerResponseTracker) *wantInfo { + return &wantInfo{ + blockPresence: make(map[peer.ID]BlockPresence), + peerRspTrkr: prt, + exhausted: false, + } +} + +// setPeerBlockPresence sets the block presence for the given peer +func (wi *wantInfo) setPeerBlockPresence(p peer.ID, bp BlockPresence) { + wi.blockPresence[p] = bp + wi.calculateBestPeer() + + // If a peer informed us that it has a block then make sure the want is no + // longer flagged as exhausted (exhausted means no peers have the block) + if bp == BPHave { + wi.exhausted = false + } +} + +// removePeer deletes the given peer from the want info +func (wi *wantInfo) removePeer(p peer.ID) { + // If we were waiting to hear back from the peer that is being removed, + // clear the sentTo field so we no longer wait + if p == wi.sentTo { + wi.sentTo = "" + } + delete(wi.blockPresence, p) + wi.calculateBestPeer() +} + +// calculateBestPeer finds the best peer to send the want to next +func (wi *wantInfo) calculateBestPeer() { + // Recalculate the best peer + bestBP := BPDontHave + bestPeer := peer.ID("") + + // Find the peer with the best block presence, recording how many peers + // share the block presence + countWithBest := 0 + for p, bp := range wi.blockPresence { + if bp > bestBP { + bestBP = bp + bestPeer = p + countWithBest = 1 + } else if bp == bestBP { + countWithBest++ + } + } + wi.bestPeer = bestPeer + + // If no peer has a block presence better than DONT_HAVE, bail out + if bestPeer == "" { + return + } + + // If there was only one peer with the best block presence, we're done + if countWithBest <= 1 { + return + } + + // There were multiple peers with the best block presence, so choose one of + // them to be the best + var peersWithBest []peer.ID + for p, bp := range wi.blockPresence { + if bp == bestBP { + peersWithBest = append(peersWithBest, p) + } + } + wi.bestPeer = wi.peerRspTrkr.choose(peersWithBest) +} diff --git a/internal/session/sessionwantsender_test.go b/internal/session/sessionwantsender_test.go new file mode 100644 index 0000000000000000000000000000000000000000..03cd5aa39702640e429d475fafdee351cbdd7b90 --- /dev/null +++ b/internal/session/sessionwantsender_test.go @@ -0,0 +1,913 @@ +package session + +import ( + "context" + "sync" + "testing" + "time" + + bsbpm "gitlab.dms3.io/dms3/go-bitswap/internal/blockpresencemanager" + bspm "gitlab.dms3.io/dms3/go-bitswap/internal/peermanager" + bsspm "gitlab.dms3.io/dms3/go-bitswap/internal/sessionpeermanager" + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type sentWants struct { + sync.Mutex + p peer.ID + wantHaves *cid.Set + wantBlocks *cid.Set +} + +func (sw *sentWants) add(wantBlocks []cid.Cid, wantHaves []cid.Cid) { + sw.Lock() + defer sw.Unlock() + + for _, c := range wantBlocks { + sw.wantBlocks.Add(c) + } + for _, c := range wantHaves { + if !sw.wantBlocks.Has(c) { + sw.wantHaves.Add(c) + } + } + +} +func (sw *sentWants) wantHavesKeys() []cid.Cid { + sw.Lock() + defer sw.Unlock() + return sw.wantHaves.Keys() +} +func (sw *sentWants) wantBlocksKeys() []cid.Cid { + sw.Lock() + defer sw.Unlock() + return sw.wantBlocks.Keys() +} + +type mockPeerManager struct { + lk sync.Mutex + peerSessions map[peer.ID]bspm.Session + peerSends map[peer.ID]*sentWants +} + +func newMockPeerManager() *mockPeerManager { + return &mockPeerManager{ + peerSessions: make(map[peer.ID]bspm.Session), + peerSends: make(map[peer.ID]*sentWants), + } +} + +func (pm *mockPeerManager) RegisterSession(p peer.ID, sess bspm.Session) { + pm.lk.Lock() + defer pm.lk.Unlock() + + pm.peerSessions[p] = sess +} + +func (pm *mockPeerManager) has(p peer.ID, sid uint64) bool { + pm.lk.Lock() + defer pm.lk.Unlock() + + if session, ok := pm.peerSessions[p]; ok { + return session.ID() == sid + } + return false +} + +func (*mockPeerManager) UnregisterSession(uint64) {} +func (*mockPeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} +func (*mockPeerManager) SendCancels(context.Context, []cid.Cid) {} + +func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + pm.lk.Lock() + defer pm.lk.Unlock() + + sw, ok := pm.peerSends[p] + if !ok { + sw = &sentWants{p: p, wantHaves: cid.NewSet(), wantBlocks: cid.NewSet()} + pm.peerSends[p] = sw + } + sw.add(wantBlocks, wantHaves) +} + +func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { + time.Sleep(10 * time.Millisecond) + + pm.lk.Lock() + defer pm.lk.Unlock() + nw := make(map[peer.ID]*sentWants) + for p, sentWants := range pm.peerSends { + nw[p] = sentWants + } + return nw +} + +func (pm *mockPeerManager) clearWants() { + pm.lk.Lock() + defer pm.lk.Unlock() + + for p := range pm.peerSends { + delete(pm.peerSends, p) + } +} + +type exhaustedPeers struct { + lk sync.Mutex + ks []cid.Cid +} + +func (ep *exhaustedPeers) onPeersExhausted(ks []cid.Cid) { + ep.lk.Lock() + defer ep.lk.Unlock() + + ep.ks = append(ep.ks, ks...) +} + +func (ep *exhaustedPeers) clear() { + ep.lk.Lock() + defer ep.lk.Unlock() + + ep.ks = nil +} + +func (ep *exhaustedPeers) exhausted() []cid.Cid { + ep.lk.Lock() + defer ep.lk.Unlock() + + return append([]cid.Cid{}, ep.ks...) +} + +func TestSendWants(t *testing.T) { + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(1) + peerA := peers[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { + t.Fatal("Wrong keys") + } + if len(sw.wantHavesKeys()) > 0 { + t.Fatal("Expecting no want-haves") + } +} + +func TestSendsWantBlockToOnePeerOnly(t *testing.T) { + cids := testutil.GenerateCids(4) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + blkCids0 := cids[0:2] + spm.Add(blkCids0) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), blkCids0) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Have not received response from peerA, so should not send want-block to + // peerB. Should have sent + // peerB: want-have cid0, cid1 + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if sw.wantBlocks.Len() > 0 { + t.Fatal("Expecting no want-blocks") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantHavesKeys(), blkCids0) { + t.Fatal("Wrong keys") + } +} + +func TestReceiveBlock(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerA: block cid0, DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{cids[1]}) + // peerB: HAVE cid0, cid1 + bpm.ReceiveFrom(peerB, cids, []cid.Cid{}) + spm.Update(peerB, []cid.Cid{}, cids, []cid.Cid{}) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should have sent + // peerB: want-block cid1 + // (should not have sent want-block for cid0 because block0 has already + // been received) + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + wb := sw.wantBlocksKeys() + if len(wb) != 1 || !wb[0].Equals(cids[1]) { + t.Fatal("Wrong keys", wb) + } +} + +func TestCancelWants(t *testing.T) { + cids := testutil.GenerateCids(4) + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1, cid2 + blkCids := cids[0:3] + spm.Add(blkCids) + + time.Sleep(5 * time.Millisecond) + + // cancel cid0, cid2 + cancelCids := []cid.Cid{cids[0], cids[2]} + spm.Cancel(cancelCids) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Should have sent cancels for cid0, cid2 + sent := swc.cancelled() + if !testutil.MatchKeysIgnoreOrder(sent, cancelCids) { + t.Fatal("Wrong keys") + } +} + +func TestRegisterSessionWithPeerManager(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // peerA: HAVE cid0 + spm.Update(peerA, nil, cids[:1], nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect session to have been registered with PeerManager + if !pm.has(peerA, sid) { + t.Fatal("Expected HAVE to register session with PeerManager") + } + + // peerB: block cid1 + spm.Update(peerB, cids[1:], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect session to have been registered with PeerManager + if !pm.has(peerB, sid) { + t.Fatal("Expected HAVE to register session with PeerManager") + } +} + +func TestProtectConnFirstPeerToSendWantedBlock(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(3) + peerA := peers[0] + peerB := peers[1] + peerC := peers[2] + sid := uint64(1) + pm := newMockPeerManager() + fpt := newFakePeerTagger() + fpm := bsspm.New(1, fpt) + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0 + spm.Add(cids[:1]) + + // peerA: block cid0 + spm.Update(peerA, cids[:1], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer A to be protected as it was first to send the block + if !fpt.isProtected(peerA) { + t.Fatal("Expected first peer to send block to have protected connection") + } + + // peerB: block cid0 + spm.Update(peerB, cids[:1], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer B not to be protected as it was not first to send the block + if fpt.isProtected(peerB) { + t.Fatal("Expected peer not to be protected") + } + + // peerC: block cid1 + spm.Update(peerC, cids[1:], nil, nil) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Expect peer C not to be protected as we didn't want the block it sent + if fpt.isProtected(peerC) { + t.Fatal("Expected peer not to be protected") + } +} + +func TestPeerUnavailable(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + // peerA: HAVE cid0 + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends := pm.waitNextWants() + + // Should have sent + // peerA: want-block cid0, cid1 + sw, ok := peerSends[peerA] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { + t.Fatal("Wrong keys") + } + + // Clear wants (makes keeping track of what's been sent easier) + pm.clearWants() + + // peerB: HAVE cid0 + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should not have sent anything because want-blocks were already sent to + // peer A + sw, ok = peerSends[peerB] + if ok && sw.wantBlocks.Len() > 0 { + t.Fatal("Expected no wants sent to peer") + } + + // peerA becomes unavailable + spm.SignalAvailability(peerA, false) + + // Wait for processing to complete + peerSends = pm.waitNextWants() + + // Should now have sent want-block cid0, cid1 to peerB + sw, ok = peerSends[peerB] + if !ok { + t.Fatal("Nothing sent to peer") + } + if !testutil.MatchKeysIgnoreOrder(sw.wantBlocksKeys(), cids) { + t.Fatal("Wrong keys") + } +} + +func TestPeersExhausted(t *testing.T) { + cids := testutil.GenerateCids(3) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + ep := exhaustedPeers{} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + + // peerA: HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer A as being available + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1]}) + + time.Sleep(5 * time.Millisecond) + + // All available peers (peer A) have sent us a DONT_HAVE for cid1, + // so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { + t.Fatal("Wrong keys") + } + + // Clear exhausted cids + ep.clear() + + // peerB: HAVE cid0 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + + // peerB: DONT_HAVE cid1, cid2 + bpm.ReceiveFrom(peerB, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) + spm.Update(peerB, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[1], cids[2]}) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE + // for cid1, but we already called onPeersExhausted with cid1, so it + // should not be called again + if len(ep.exhausted()) > 0 { + t.Fatal("Wrong keys") + } + + // peerA: DONT_HAVE cid2 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[2]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[2]}) + + // Wait for processing to complete + pm.waitNextWants() + + // All available peers (peer A and peer B) have sent us a DONT_HAVE for + // cid2, so expect that onPeersExhausted() will be called with cid2 + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[2]}) { + t.Fatal("Wrong keys") + } +} + +// Tests that when +// - all the peers except one have sent a DONT_HAVE for a CID +// - the remaining peer becomes unavailable +// onPeersExhausted should be sent for that CID +func TestPeersExhaustedLastWaitingPeerUnavailable(t *testing.T) { + cids := testutil.GenerateCids(2) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + ep := exhaustedPeers{} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) + + go spm.Run() + + // add cid0, cid1 + spm.Add(cids) + + // peerA: HAVE cid0 + bpm.ReceiveFrom(peerA, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer A as being available + spm.Update(peerA, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + // peerB: HAVE cid0 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + // peerA: DONT_HAVE cid1 + bpm.ReceiveFrom(peerA, []cid.Cid{}, []cid.Cid{cids[1]}) + spm.Update(peerA, []cid.Cid{}, []cid.Cid{}, []cid.Cid{cids[0]}) + + time.Sleep(5 * time.Millisecond) + + // peerB: becomes unavailable + spm.SignalAvailability(peerB, false) + + time.Sleep(5 * time.Millisecond) + + // All remaining peers (peer A) have sent us a DONT_HAVE for cid1, + // so expect that onPeersExhausted() will be called with cid1 + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1]}) { + t.Fatal("Wrong keys") + } +} + +// Tests that when all the peers are removed from the session +// onPeersExhausted should be called with all outstanding CIDs +func TestPeersExhaustedAllPeersUnavailable(t *testing.T) { + cids := testutil.GenerateCids(3) + peers := testutil.GeneratePeers(2) + peerA := peers[0] + peerB := peers[1] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + + ep := exhaustedPeers{} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, ep.onPeersExhausted) + + go spm.Run() + + // add cid0, cid1, cid2 + spm.Add(cids) + + // peerA: receive block for cid0 (and register peer A with sessionWantSender) + spm.Update(peerA, []cid.Cid{cids[0]}, []cid.Cid{}, []cid.Cid{}) + // peerB: HAVE cid1 + bpm.ReceiveFrom(peerB, []cid.Cid{cids[0]}, []cid.Cid{}) + // Note: this also registers peer B as being available + spm.Update(peerB, []cid.Cid{}, []cid.Cid{cids[0]}, []cid.Cid{}) + + time.Sleep(5 * time.Millisecond) + + // peerA and peerB: become unavailable + spm.SignalAvailability(peerA, false) + spm.SignalAvailability(peerB, false) + + time.Sleep(5 * time.Millisecond) + + // Expect that onPeersExhausted() will be called with all cids for blocks + // that have not been received + if !testutil.MatchKeysIgnoreOrder(ep.exhausted(), []cid.Cid{cids[1], cids[2]}) { + t.Fatal("Wrong keys") + } +} + +func TestConsecutiveDontHaveLimit(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that do not exceed limit + for _, c := range cids[1:peerDontHaveLimit] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[peerDontHaveLimit:] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Session should remove peer + if has := fpm.HasPeer(p); has { + t.Fatal("Expected peer not to be available") + } +} + +func TestConsecutiveDontHaveLimitInterrupted(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVE then HAVE then DONT_HAVE from peer, + // where consecutive DONT_HAVEs would have exceeded limit + // (but they are not consecutive) + for _, c := range cids[1:peerDontHaveLimit] { + // DONT_HAVEs + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + for _, c := range cids[peerDontHaveLimit : peerDontHaveLimit+1] { + // HAVEs + bpm.ReceiveFrom(p, []cid.Cid{c}, []cid.Cid{}) + spm.Update(p, []cid.Cid{}, []cid.Cid{c}, []cid.Cid{}) + } + for _, c := range cids[peerDontHaveLimit+1:] { + // DONT_HAVEs + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } +} + +func TestConsecutiveDontHaveReinstateAfterRemoval(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a block from peer (adds it to the session) + spm.Update(p, cids[:1], []cid.Cid{}, []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(5 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[1 : peerDontHaveLimit+2] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Session should remove peer + if has := fpm.HasPeer(p); has { + t.Fatal("Expected peer not to be available") + } + + // Receive a HAVE from peer (adds it back into the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + cids2 := testutil.GenerateCids(peerDontHaveLimit + 10) + + // Receive DONT_HAVEs from peer that don't exceed limit + for _, c := range cids2[1:peerDontHaveLimit] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids2[peerDontHaveLimit:] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Session should remove peer + if has := fpm.HasPeer(p); has { + t.Fatal("Expected peer not to be available") + } +} + +func TestConsecutiveDontHaveDontRemoveIfHasWantedBlock(t *testing.T) { + cids := testutil.GenerateCids(peerDontHaveLimit + 10) + p := testutil.GeneratePeers(1)[0] + sid := uint64(1) + pm := newMockPeerManager() + fpm := newFakeSessionPeerManager() + swc := newMockSessionMgr() + bpm := bsbpm.New() + onSend := func(peer.ID, []cid.Cid, []cid.Cid) {} + onPeersExhausted := func([]cid.Cid) {} + spm := newSessionWantSender(sid, pm, fpm, swc, bpm, onSend, onPeersExhausted) + defer spm.Shutdown() + + go spm.Run() + + // Add all cids as wants + spm.Add(cids) + + // Receive a HAVE from peer (adds it to the session) + bpm.ReceiveFrom(p, cids[:1], []cid.Cid{}) + spm.Update(p, []cid.Cid{}, cids[:1], []cid.Cid{}) + + // Wait for processing to complete + time.Sleep(10 * time.Millisecond) + + // Peer should be available + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } + + // Receive DONT_HAVEs from peer that exceed limit + for _, c := range cids[1 : peerDontHaveLimit+5] { + bpm.ReceiveFrom(p, []cid.Cid{}, []cid.Cid{c}) + spm.Update(p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{c}) + } + + // Wait for processing to complete + time.Sleep(20 * time.Millisecond) + + // Peer should still be available because it has a block that we want. + // (We received a HAVE for cid 0 but didn't yet receive the block) + if has := fpm.HasPeer(p); !has { + t.Fatal("Expected peer to be available") + } +} diff --git a/internal/session/wantinfo_test.go b/internal/session/wantinfo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a410be867083611cef1893fc5b984b6262495342 --- /dev/null +++ b/internal/session/wantinfo_test.go @@ -0,0 +1,80 @@ +package session + +import ( + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" +) + +func TestEmptyWantInfo(t *testing.T) { + wp := newWantInfo(newPeerResponseTracker()) + + if wp.bestPeer != "" { + t.Fatal("expected no best peer") + } +} + +func TestSetPeerBlockPresence(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestSetPeerBlockPresenceBestLower(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPHave) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} + +func TestRemoveThenSetDontHave(t *testing.T) { + peers := testutil.GeneratePeers(2) + wp := newWantInfo(newPeerResponseTracker()) + + wp.setPeerBlockPresence(peers[0], BPUnknown) + if wp.bestPeer != peers[0] { + t.Fatal("wrong best peer") + } + + wp.removePeer(peers[0]) + if wp.bestPeer != "" { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[1], BPUnknown) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } + + wp.setPeerBlockPresence(peers[0], BPDontHave) + if wp.bestPeer != peers[1] { + t.Fatal("wrong best peer") + } +} diff --git a/internal/sessioninterestmanager/sessioninterestmanager.go b/internal/sessioninterestmanager/sessioninterestmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..0a9f2e4f9a88c371c616187b3a49ef798b2fd445 --- /dev/null +++ b/internal/sessioninterestmanager/sessioninterestmanager.go @@ -0,0 +1,201 @@ +package sessioninterestmanager + +import ( + "sync" + + blocks "gitlab.dms3.io/dms3/go-block-format" + + cid "gitlab.dms3.io/dms3/go-cid" +) + +// SessionInterestManager records the CIDs that each session is interested in. +type SessionInterestManager struct { + lk sync.RWMutex + wants map[cid.Cid]map[uint64]bool +} + +// New initializes a new SessionInterestManager. +func New() *SessionInterestManager { + return &SessionInterestManager{ + // Map of cids -> sessions -> bool + // + // The boolean indicates whether the session still wants the block + // or is just interested in receiving messages about it. + // + // Note that once the block is received the session no longer wants + // the block, but still wants to receive messages from peers who have + // the block as they may have other blocks the session is interested in. + wants: make(map[cid.Cid]map[uint64]bool), + } +} + +// When the client asks the session for blocks, the session calls +// RecordSessionInterest() with those cids. +func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { + sim.lk.Lock() + defer sim.lk.Unlock() + + // For each key + for _, c := range ks { + // Record that the session wants the blocks + if want, ok := sim.wants[c]; ok { + want[ses] = true + } else { + sim.wants[c] = map[uint64]bool{ses: true} + } + } +} + +// When the session shuts down it calls RemoveSessionInterest(). +// Returns the keys that no session is interested in any more. +func (sim *SessionInterestManager) RemoveSession(ses uint64) []cid.Cid { + sim.lk.Lock() + defer sim.lk.Unlock() + + // The keys that no session is interested in + deletedKs := make([]cid.Cid, 0) + + // For each known key + for c := range sim.wants { + // Remove the session from the list of sessions that want the key + delete(sim.wants[c], ses) + + // If there are no more sessions that want the key + if len(sim.wants[c]) == 0 { + // Clean up the list memory + delete(sim.wants, c) + // Add the key to the list of keys that no session is interested in + deletedKs = append(deletedKs, c) + } + } + + return deletedKs +} + +// When the session receives blocks, it calls RemoveSessionWants(). +func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, ks []cid.Cid) { + sim.lk.Lock() + defer sim.lk.Unlock() + + // For each key + for _, c := range ks { + // If the session wanted the block + if wanted, ok := sim.wants[c][ses]; ok && wanted { + // Mark the block as unwanted + sim.wants[c][ses] = false + } + } +} + +// When a request is cancelled, the session calls RemoveSessionInterested(). +// Returns the keys that no session is interested in any more. +func (sim *SessionInterestManager) RemoveSessionInterested(ses uint64, ks []cid.Cid) []cid.Cid { + sim.lk.Lock() + defer sim.lk.Unlock() + + // The keys that no session is interested in + deletedKs := make([]cid.Cid, 0, len(ks)) + + // For each key + for _, c := range ks { + // If there is a list of sessions that want the key + if _, ok := sim.wants[c]; ok { + // Remove the session from the list of sessions that want the key + delete(sim.wants[c], ses) + + // If there are no more sessions that want the key + if len(sim.wants[c]) == 0 { + // Clean up the list memory + delete(sim.wants, c) + // Add the key to the list of keys that no session is interested in + deletedKs = append(deletedKs, c) + } + } + } + + return deletedKs +} + +// The session calls FilterSessionInterested() to filter the sets of keys for +// those that the session is interested in +func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { + sim.lk.RLock() + defer sim.lk.RUnlock() + + // For each set of keys + kres := make([][]cid.Cid, len(ksets)) + for i, ks := range ksets { + // The set of keys that at least one session is interested in + has := make([]cid.Cid, 0, len(ks)) + + // For each key in the list + for _, c := range ks { + // If there is a session that's interested, add the key to the set + if _, ok := sim.wants[c][ses]; ok { + has = append(has, c) + } + } + kres[i] = has + } + return kres +} + +// When bitswap receives blocks it calls SplitWantedUnwanted() to discard +// unwanted blocks +func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { + sim.lk.RLock() + defer sim.lk.RUnlock() + + // Get the wanted block keys as a set + wantedKs := cid.NewSet() + for _, b := range blks { + c := b.Cid() + // For each session that is interested in the key + for ses := range sim.wants[c] { + // If the session wants the key (rather than just being interested) + if wanted, ok := sim.wants[c][ses]; ok && wanted { + // Add the key to the set + wantedKs.Add(c) + } + } + } + + // Separate the blocks into wanted and unwanted + wantedBlks := make([]blocks.Block, 0, len(blks)) + notWantedBlks := make([]blocks.Block, 0) + for _, b := range blks { + if wantedKs.Has(b.Cid()) { + wantedBlks = append(wantedBlks, b) + } else { + notWantedBlks = append(notWantedBlks, b) + } + } + return wantedBlks, notWantedBlks +} + +// When the SessionManager receives a message it calls InterestedSessions() to +// find out which sessions are interested in the message. +func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { + sim.lk.RLock() + defer sim.lk.RUnlock() + + ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) + ks = append(ks, blks...) + ks = append(ks, haves...) + ks = append(ks, dontHaves...) + + // Create a set of sessions that are interested in the keys + sesSet := make(map[uint64]struct{}) + for _, c := range ks { + for s := range sim.wants[c] { + sesSet[s] = struct{}{} + } + } + + // Convert the set into a list + ses := make([]uint64, 0, len(sesSet)) + for s := range sesSet { + ses = append(ses, s) + } + return ses +} diff --git a/internal/sessioninterestmanager/sessioninterestmanager_test.go b/internal/sessioninterestmanager/sessioninterestmanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..03f308f00608342585020bcef48fd57546d99304 --- /dev/null +++ b/internal/sessioninterestmanager/sessioninterestmanager_test.go @@ -0,0 +1,218 @@ +package sessioninterestmanager + +import ( + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + cid "gitlab.dms3.io/dms3/go-cid" +) + +func TestEmpty(t *testing.T) { + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(2) + res := sim.FilterSessionInterested(ses, cids) + if len(res) != 1 || len(res[0]) > 0 { + t.Fatal("Expected no interest") + } + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) > 0 { + t.Fatal("Expected no interest") + } +} + +func TestBasic(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + if len(sim.InterestedSessions(cids1, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + + sim.RecordSessionInterest(ses2, cids2) + res = sim.FilterSessionInterested(ses2, cids1[:1]) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + res = sim.FilterSessionInterested(ses2, cids2) + if len(res) != 1 || len(res[0]) != 2 { + t.Fatal("Expected 2 keys") + } + + if len(sim.InterestedSessions(cids1[:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids1[1:], []cid.Cid{}, []cid.Cid{})) != 2 { + t.Fatal("Expected 2 sessions") + } +} + +func TestInterestedSessions(t *testing.T) { + sim := New() + + ses := uint64(1) + cids := testutil.GenerateCids(3) + sim.RecordSessionInterest(ses, cids[0:2]) + + if len(sim.InterestedSessions(cids, []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions(cids[0:1], []cid.Cid{}, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids, []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, cids[0:1], []cid.Cid{})) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids)) != 1 { + t.Fatal("Expected 1 session") + } + if len(sim.InterestedSessions([]cid.Cid{}, []cid.Cid{}, cids[0:1])) != 1 { + t.Fatal("Expected 1 session") + } +} + +func TestRemoveSession(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + sim.RecordSessionInterest(ses2, cids2) + sim.RemoveSession(ses1) + + res := sim.FilterSessionInterested(ses1, cids1) + if len(res) != 1 || len(res[0]) != 0 { + t.Fatal("Expected no interest") + } + + res = sim.FilterSessionInterested(ses2, cids1, cids2) + if len(res) != 2 { + t.Fatal("unexpected results size") + } + if len(res[0]) != 1 { + t.Fatal("Expected 1 key") + } + if len(res[1]) != 2 { + t.Fatal("Expected 2 keys") + } +} + +func TestRemoveSessionInterested(t *testing.T) { + sim := New() + + ses1 := uint64(1) + ses2 := uint64(2) + cids1 := testutil.GenerateCids(2) + cids2 := append(testutil.GenerateCids(1), cids1[1]) + sim.RecordSessionInterest(ses1, cids1) + sim.RecordSessionInterest(ses2, cids2) + + res := sim.RemoveSessionInterested(ses1, []cid.Cid{cids1[0]}) + if len(res) != 1 { + t.Fatal("Expected no interested sessions left") + } + + interested := sim.FilterSessionInterested(ses1, cids1) + if len(interested) != 1 || len(interested[0]) != 1 { + t.Fatal("Expected ses1 still interested in one cid") + } + + res = sim.RemoveSessionInterested(ses1, cids1) + if len(res) != 0 { + t.Fatal("Expected ses2 to be interested in one cid") + } + + interested = sim.FilterSessionInterested(ses1, cids1) + if len(interested) != 1 || len(interested[0]) != 0 { + t.Fatal("Expected ses1 to have no remaining interest") + } + + interested = sim.FilterSessionInterested(ses2, cids1) + if len(interested) != 1 || len(interested[0]) != 1 { + t.Fatal("Expected ses2 to still be interested in one key") + } +} + +func TestSplitWantedUnwanted(t *testing.T) { + blks := testutil.GenerateBlocksOfSize(3, 1024) + sim := New() + ses1 := uint64(1) + ses2 := uint64(2) + + var cids []cid.Cid + for _, b := range blks { + cids = append(cids, b.Cid()) + } + + // ses1: + // ses2: + wanted, unwanted := sim.SplitWantedUnwanted(blks) + if len(wanted) > 0 { + t.Fatal("Expected no blocks") + } + if len(unwanted) != 3 { + t.Fatal("Expected 3 blocks") + } + + // ses1: 0 1 + // ses2: + sim.RecordSessionInterest(ses1, cids[0:2]) + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected 1 block") + } + + // ses1: 1 + // ses2: 1 2 + sim.RecordSessionInterest(ses2, cids[1:]) + sim.RemoveSessionWants(ses1, cids[:1]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 1 2 + sim.RemoveSessionWants(ses1, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 2 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 1 { + t.Fatal("Expected no blocks") + } + + // ses1: + // ses2: 2 + sim.RemoveSessionWants(ses2, cids[1:2]) + + wanted, unwanted = sim.SplitWantedUnwanted(blks) + if len(wanted) != 1 { + t.Fatal("Expected 2 blocks") + } + if len(unwanted) != 2 { + t.Fatal("Expected 2 blocks") + } +} diff --git a/internal/sessionmanager/sessionmanager.go b/internal/sessionmanager/sessionmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..e20efcd36cf2f4a5b2ba79a43cd4921379ef878a --- /dev/null +++ b/internal/sessionmanager/sessionmanager.go @@ -0,0 +1,189 @@ +package sessionmanager + +import ( + "context" + "sync" + "time" + + cid "gitlab.dms3.io/dms3/go-cid" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + + bsbpm "gitlab.dms3.io/dms3/go-bitswap/internal/blockpresencemanager" + notifications "gitlab.dms3.io/dms3/go-bitswap/internal/notifications" + bssession "gitlab.dms3.io/dms3/go-bitswap/internal/session" + bssim "gitlab.dms3.io/dms3/go-bitswap/internal/sessioninterestmanager" + exchange "gitlab.dms3.io/dms3/go-dms3-exchange-interface" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +// Session is a session that is managed by the session manager +type Session interface { + exchange.Fetcher + ID() uint64 + ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) + Shutdown() +} + +// SessionFactory generates a new session for the SessionManager to track. +type SessionFactory func( + ctx context.Context, + sm bssession.SessionManager, + id uint64, + sprm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) Session + +// PeerManagerFactory generates a new peer manager for a session. +type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager + +// SessionManager is responsible for creating, managing, and dispatching to +// sessions. +type SessionManager struct { + ctx context.Context + sessionFactory SessionFactory + sessionInterestManager *bssim.SessionInterestManager + peerManagerFactory PeerManagerFactory + blockPresenceManager *bsbpm.BlockPresenceManager + peerManager bssession.PeerManager + notif notifications.PubSub + + // Sessions + sessLk sync.RWMutex + sessions map[uint64]Session + + // Session Index + sessIDLk sync.Mutex + sessID uint64 + + self peer.ID +} + +// New creates a new SessionManager. +func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, + blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { + + return &SessionManager{ + ctx: ctx, + sessionFactory: sessionFactory, + sessionInterestManager: sessionInterestManager, + peerManagerFactory: peerManagerFactory, + blockPresenceManager: blockPresenceManager, + peerManager: peerManager, + notif: notif, + sessions: make(map[uint64]Session), + self: self, + } +} + +// NewSession initializes a session with the given context, and adds to the +// session manager. +func (sm *SessionManager) NewSession(ctx context.Context, + provSearchDelay time.Duration, + rebroadcastDelay delay.D) exchange.Fetcher { + id := sm.GetNextSessionID() + + pm := sm.peerManagerFactory(ctx, id) + session := sm.sessionFactory(ctx, sm, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) + + sm.sessLk.Lock() + if sm.sessions != nil { // check if SessionManager was shutdown + sm.sessions[id] = session + } + sm.sessLk.Unlock() + + return session +} + +func (sm *SessionManager) Shutdown() { + sm.sessLk.Lock() + + sessions := make([]Session, 0, len(sm.sessions)) + for _, ses := range sm.sessions { + sessions = append(sessions, ses) + } + + // Ensure that if Shutdown() is called twice we only shut down + // the sessions once + sm.sessions = nil + + sm.sessLk.Unlock() + + for _, ses := range sessions { + ses.Shutdown() + } +} + +func (sm *SessionManager) RemoveSession(sesid uint64) { + // Remove session from SessionInterestManager - returns the keys that no + // session is interested in anymore. + cancelKs := sm.sessionInterestManager.RemoveSession(sesid) + + // Cancel keys that no session is interested in anymore + sm.cancelWants(cancelKs) + + sm.sessLk.Lock() + defer sm.sessLk.Unlock() + + // Clean up session + if sm.sessions != nil { // check if SessionManager was shutdown + delete(sm.sessions, sesid) + } +} + +// GetNextSessionID returns the next sequential identifier for a session. +func (sm *SessionManager) GetNextSessionID() uint64 { + sm.sessIDLk.Lock() + defer sm.sessIDLk.Unlock() + + sm.sessID++ + return sm.sessID +} + +// ReceiveFrom is called when a new message is received +func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { + // Record block presence for HAVE / DONT_HAVE + sm.blockPresenceManager.ReceiveFrom(p, haves, dontHaves) + + // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs + for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { + sm.sessLk.RLock() + if sm.sessions == nil { // check if SessionManager was shutdown + sm.sessLk.RUnlock() + return + } + sess, ok := sm.sessions[id] + sm.sessLk.RUnlock() + + if ok { + sess.ReceiveFrom(p, blks, haves, dontHaves) + } + } + + // Send CANCEL to all peers with want-have / want-block + sm.peerManager.SendCancels(ctx, blks) +} + +// CancelSessionWants is called when a session cancels wants because a call to +// GetBlocks() is cancelled +func (sm *SessionManager) CancelSessionWants(sesid uint64, wants []cid.Cid) { + // Remove session's interest in the given blocks - returns the keys that no + // session is interested in anymore. + cancelKs := sm.sessionInterestManager.RemoveSessionInterested(sesid, wants) + sm.cancelWants(cancelKs) +} + +func (sm *SessionManager) cancelWants(wants []cid.Cid) { + // Free up block presence tracking for keys that no session is interested + // in anymore + sm.blockPresenceManager.RemoveKeys(wants) + + // Send CANCEL to all peers for blocks that no session is interested in + // anymore. + // Note: use bitswap context because session context may already be Done. + sm.peerManager.SendCancels(sm.ctx, wants) +} diff --git a/internal/sessionmanager/sessionmanager_test.go b/internal/sessionmanager/sessionmanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..353bd6d681b9863341f77823444116e8ada99a7d --- /dev/null +++ b/internal/sessionmanager/sessionmanager_test.go @@ -0,0 +1,261 @@ +package sessionmanager + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + delay "gitlab.dms3.io/dms3/go-dms3-delay" + + bsbpm "gitlab.dms3.io/dms3/go-bitswap/internal/blockpresencemanager" + notifications "gitlab.dms3.io/dms3/go-bitswap/internal/notifications" + bspm "gitlab.dms3.io/dms3/go-bitswap/internal/peermanager" + bssession "gitlab.dms3.io/dms3/go-bitswap/internal/session" + bssim "gitlab.dms3.io/dms3/go-bitswap/internal/sessioninterestmanager" + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type fakeSession struct { + ks []cid.Cid + wantBlocks []cid.Cid + wantHaves []cid.Cid + id uint64 + pm *fakeSesPeerManager + sm bssession.SessionManager + notif notifications.PubSub +} + +func (*fakeSession) GetBlock(context.Context, cid.Cid) (blocks.Block, error) { + return nil, nil +} +func (*fakeSession) GetBlocks(context.Context, []cid.Cid) (<-chan blocks.Block, error) { + return nil, nil +} +func (fs *fakeSession) ID() uint64 { + return fs.id +} +func (fs *fakeSession) ReceiveFrom(p peer.ID, ks []cid.Cid, wantBlocks []cid.Cid, wantHaves []cid.Cid) { + fs.ks = append(fs.ks, ks...) + fs.wantBlocks = append(fs.wantBlocks, wantBlocks...) + fs.wantHaves = append(fs.wantHaves, wantHaves...) +} +func (fs *fakeSession) Shutdown() { + fs.sm.RemoveSession(fs.id) +} + +type fakeSesPeerManager struct { +} + +func (*fakeSesPeerManager) Peers() []peer.ID { return nil } +func (*fakeSesPeerManager) PeersDiscovered() bool { return false } +func (*fakeSesPeerManager) Shutdown() {} +func (*fakeSesPeerManager) AddPeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) RemovePeer(peer.ID) bool { return false } +func (*fakeSesPeerManager) HasPeers() bool { return false } +func (*fakeSesPeerManager) ProtectConnection(peer.ID) {} + +type fakePeerManager struct { + lk sync.Mutex + cancels []cid.Cid +} + +func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} +func (*fakePeerManager) UnregisterSession(uint64) {} +func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} +func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} +func (fpm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { + fpm.lk.Lock() + defer fpm.lk.Unlock() + fpm.cancels = append(fpm.cancels, cancels...) +} +func (fpm *fakePeerManager) cancelled() []cid.Cid { + fpm.lk.Lock() + defer fpm.lk.Unlock() + return fpm.cancels +} + +func sessionFactory(ctx context.Context, + sm bssession.SessionManager, + id uint64, + sprm bssession.SessionPeerManager, + sim *bssim.SessionInterestManager, + pm bssession.PeerManager, + bpm *bsbpm.BlockPresenceManager, + notif notifications.PubSub, + provSearchDelay time.Duration, + rebroadcastDelay delay.D, + self peer.ID) Session { + fs := &fakeSession{ + id: id, + pm: sprm.(*fakeSesPeerManager), + sm: sm, + notif: notif, + } + go func() { + <-ctx.Done() + sm.RemoveSession(fs.id) + }() + return fs +} + +func peerManagerFactory(ctx context.Context, id uint64) bssession.SessionPeerManager { + return &fakeSesPeerManager{} +} + +func TestReceiveFrom(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + if len(firstSession.ks) == 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) == 0 { + t.Fatal("should have received blocks but didn't") + } + + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{block.Cid()}, []cid.Cid{}) + if len(firstSession.wantBlocks) == 0 || + len(secondSession.wantBlocks) > 0 || + len(thirdSession.wantBlocks) == 0 { + t.Fatal("should have received want-blocks but didn't") + } + + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, []cid.Cid{block.Cid()}) + if len(firstSession.wantHaves) == 0 || + len(secondSession.wantHaves) > 0 || + len(thirdSession.wantHaves) == 0 { + t.Fatal("should have received want-haves but didn't") + } + + if len(pm.cancelled()) != 1 { + t.Fatal("should have sent cancel for received blocks") + } +} + +func TestReceiveBlocksWhenManagerShutdown(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + secondSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sm.Shutdown() + + // wait for sessions to get removed + time.Sleep(10 * time.Millisecond) + + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + if len(firstSession.ks) > 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) > 0 { + t.Fatal("received blocks for sessions after manager is shutdown") + } +} + +func TestReceiveBlocksWhenSessionContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sessionCtx, sessionCancel := context.WithCancel(ctx) + secondSession := sm.NewSession(sessionCtx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + thirdSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + + sim.RecordSessionInterest(firstSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(secondSession.ID(), []cid.Cid{block.Cid()}) + sim.RecordSessionInterest(thirdSession.ID(), []cid.Cid{block.Cid()}) + + sessionCancel() + + // wait for sessions to get removed + time.Sleep(10 * time.Millisecond) + + sm.ReceiveFrom(ctx, p, []cid.Cid{block.Cid()}, []cid.Cid{}, []cid.Cid{}) + if len(firstSession.ks) == 0 || + len(secondSession.ks) > 0 || + len(thirdSession.ks) == 0 { + t.Fatal("received blocks for sessions that are canceled") + } +} + +func TestShutdown(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + notif := notifications.New() + defer notif.Shutdown() + sim := bssim.New() + bpm := bsbpm.New() + pm := &fakePeerManager{} + sm := New(ctx, sessionFactory, sim, peerManagerFactory, bpm, pm, notif, "") + + p := peer.ID(fmt.Sprint(123)) + block := blocks.NewBlock([]byte("block")) + cids := []cid.Cid{block.Cid()} + firstSession := sm.NewSession(ctx, time.Second, delay.Fixed(time.Minute)).(*fakeSession) + sim.RecordSessionInterest(firstSession.ID(), cids) + sm.ReceiveFrom(ctx, p, []cid.Cid{}, []cid.Cid{}, cids) + + if !bpm.HasKey(block.Cid()) { + t.Fatal("expected cid to be added to block presence manager") + } + + sm.Shutdown() + + // wait for cleanup + time.Sleep(10 * time.Millisecond) + + if bpm.HasKey(block.Cid()) { + t.Fatal("expected cid to be removed from block presence manager") + } + if !testutil.MatchKeysIgnoreOrder(pm.cancelled(), cids) { + t.Fatal("expected cancels to be sent") + } +} diff --git a/internal/sessionpeermanager/sessionpeermanager.go b/internal/sessionpeermanager/sessionpeermanager.go new file mode 100644 index 0000000000000000000000000000000000000000..e5187844c72e9fd749d0d602cc27407601645874 --- /dev/null +++ b/internal/sessionpeermanager/sessionpeermanager.go @@ -0,0 +1,150 @@ +package sessionpeermanager + +import ( + "fmt" + "sync" + + logging "gitlab.dms3.io/dms3/go-log" + + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +var log = logging.Logger("bs:sprmgr") + +const ( + // Connection Manager tag value for session peers. Indicates to connection + // manager that it should keep the connection to the peer. + sessionPeerTagValue = 5 +) + +// PeerTagger is an interface for tagging peers with metadata +type PeerTagger interface { + TagPeer(peer.ID, string, int) + UntagPeer(p peer.ID, tag string) + Protect(peer.ID, string) + Unprotect(peer.ID, string) bool +} + +// SessionPeerManager keeps track of peers for a session, and takes care of +// ConnectionManager tagging. +type SessionPeerManager struct { + tagger PeerTagger + tag string + + id uint64 + plk sync.RWMutex + peers map[peer.ID]struct{} + peersDiscovered bool +} + +// New creates a new SessionPeerManager +func New(id uint64, tagger PeerTagger) *SessionPeerManager { + return &SessionPeerManager{ + id: id, + tag: fmt.Sprint("bs-ses-", id), + tagger: tagger, + peers: make(map[peer.ID]struct{}), + } +} + +// AddPeer adds the peer to the SessionPeerManager. +// Returns true if the peer is a new peer, false if it already existed. +func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { + spm.plk.Lock() + defer spm.plk.Unlock() + + // Check if the peer is a new peer + if _, ok := spm.peers[p]; ok { + return false + } + + spm.peers[p] = struct{}{} + spm.peersDiscovered = true + + // Tag the peer with the ConnectionManager so it doesn't discard the + // connection + spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) + + log.Debugw("Bitswap: Added peer to session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) + return true +} + +// Protect connection to this peer from being pruned by the connection manager +func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { + spm.plk.Lock() + defer spm.plk.Unlock() + + if _, ok := spm.peers[p]; !ok { + return + } + + spm.tagger.Protect(p, spm.tag) +} + +// RemovePeer removes the peer from the SessionPeerManager. +// Returns true if the peer was removed, false if it did not exist. +func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { + spm.plk.Lock() + defer spm.plk.Unlock() + + if _, ok := spm.peers[p]; !ok { + return false + } + + delete(spm.peers, p) + spm.tagger.UntagPeer(p, spm.tag) + spm.tagger.Unprotect(p, spm.tag) + + log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) + return true +} + +// PeersDiscovered indicates whether peers have been discovered yet. +// Returns true once a peer has been discovered by the session (even if all +// peers are later removed from the session). +func (spm *SessionPeerManager) PeersDiscovered() bool { + spm.plk.RLock() + defer spm.plk.RUnlock() + + return spm.peersDiscovered +} + +func (spm *SessionPeerManager) Peers() []peer.ID { + spm.plk.RLock() + defer spm.plk.RUnlock() + + peers := make([]peer.ID, 0, len(spm.peers)) + for p := range spm.peers { + peers = append(peers, p) + } + + return peers +} + +func (spm *SessionPeerManager) HasPeers() bool { + spm.plk.RLock() + defer spm.plk.RUnlock() + + return len(spm.peers) > 0 +} + +func (spm *SessionPeerManager) HasPeer(p peer.ID) bool { + spm.plk.RLock() + defer spm.plk.RUnlock() + + _, ok := spm.peers[p] + return ok +} + +// Shutdown untags all the peers +func (spm *SessionPeerManager) Shutdown() { + spm.plk.Lock() + defer spm.plk.Unlock() + + // Untag the peers with the ConnectionManager so that it can release + // connections to those peers + for p := range spm.peers { + spm.tagger.UntagPeer(p, spm.tag) + spm.tagger.Unprotect(p, spm.tag) + } +} diff --git a/internal/sessionpeermanager/sessionpeermanager_test.go b/internal/sessionpeermanager/sessionpeermanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c20fdb3af75979d484702aca0a60c61ccc624d70 --- /dev/null +++ b/internal/sessionpeermanager/sessionpeermanager_test.go @@ -0,0 +1,302 @@ +package sessionpeermanager + +import ( + "sync" + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type fakePeerTagger struct { + lk sync.Mutex + taggedPeers []peer.ID + protectedPeers map[peer.ID]map[string]struct{} + wait sync.WaitGroup +} + +func newFakePeerTagger() *fakePeerTagger { + return &fakePeerTagger{ + protectedPeers: make(map[peer.ID]map[string]struct{}), + } +} + +func (fpt *fakePeerTagger) TagPeer(p peer.ID, tag string, n int) { + fpt.wait.Add(1) + + fpt.lk.Lock() + defer fpt.lk.Unlock() + fpt.taggedPeers = append(fpt.taggedPeers, p) +} + +func (fpt *fakePeerTagger) UntagPeer(p peer.ID, tag string) { + defer fpt.wait.Done() + + fpt.lk.Lock() + defer fpt.lk.Unlock() + for i := 0; i < len(fpt.taggedPeers); i++ { + if fpt.taggedPeers[i] == p { + fpt.taggedPeers[i] = fpt.taggedPeers[len(fpt.taggedPeers)-1] + fpt.taggedPeers = fpt.taggedPeers[:len(fpt.taggedPeers)-1] + return + } + } +} + +func (fpt *fakePeerTagger) Protect(p peer.ID, tag string) { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + tags, ok := fpt.protectedPeers[p] + if !ok { + tags = make(map[string]struct{}) + fpt.protectedPeers[p] = tags + } + tags[tag] = struct{}{} +} + +func (fpt *fakePeerTagger) Unprotect(p peer.ID, tag string) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + if tags, ok := fpt.protectedPeers[p]; ok { + delete(tags, tag) + if len(tags) == 0 { + delete(fpt.protectedPeers, p) + } + return len(tags) > 0 + } + + return false +} + +func (fpt *fakePeerTagger) isProtected(p peer.ID) bool { + fpt.lk.Lock() + defer fpt.lk.Unlock() + + return len(fpt.protectedPeers[p]) > 0 +} + +func TestAddPeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + isNew := spm.AddPeer(peers[0]) + if !isNew { + t.Fatal("Expected peer to be new") + } + + isNew = spm.AddPeer(peers[0]) + if isNew { + t.Fatal("Expected peer to no longer be new") + } + + isNew = spm.AddPeer(peers[1]) + if !isNew { + t.Fatal("Expected peer to be new") + } +} + +func TestRemovePeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + existed := spm.RemovePeer(peers[0]) + if existed { + t.Fatal("Expected peer not to exist") + } + + spm.AddPeer(peers[0]) + spm.AddPeer(peers[1]) + + existed = spm.RemovePeer(peers[0]) + if !existed { + t.Fatal("Expected peer to exist") + } + existed = spm.RemovePeer(peers[1]) + if !existed { + t.Fatal("Expected peer to exist") + } + existed = spm.RemovePeer(peers[0]) + if existed { + t.Fatal("Expected peer not to have existed") + } +} + +func TestHasPeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if spm.HasPeers() { + t.Fatal("Expected not to have peers yet") + } + + spm.AddPeer(peers[0]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") + } + + spm.AddPeer(peers[1]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") + } + + spm.RemovePeer(peers[0]) + if !spm.HasPeers() { + t.Fatal("Expected to have peers") + } + + spm.RemovePeer(peers[1]) + if spm.HasPeers() { + t.Fatal("Expected to no longer have peers") + } +} + +func TestHasPeer(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if spm.HasPeer(peers[0]) { + t.Fatal("Expected not to have peer yet") + } + + spm.AddPeer(peers[0]) + if !spm.HasPeer(peers[0]) { + t.Fatal("Expected to have peer") + } + + spm.AddPeer(peers[1]) + if !spm.HasPeer(peers[1]) { + t.Fatal("Expected to have peer") + } + + spm.RemovePeer(peers[0]) + if spm.HasPeer(peers[0]) { + t.Fatal("Expected not to have peer") + } + + if !spm.HasPeer(peers[1]) { + t.Fatal("Expected to have peer") + } +} + +func TestPeers(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if len(spm.Peers()) > 0 { + t.Fatal("Expected not to have peers yet") + } + + spm.AddPeer(peers[0]) + if len(spm.Peers()) != 1 { + t.Fatal("Expected to have one peer") + } + + spm.AddPeer(peers[1]) + if len(spm.Peers()) != 2 { + t.Fatal("Expected to have two peers") + } + + spm.RemovePeer(peers[0]) + if len(spm.Peers()) != 1 { + t.Fatal("Expected to have one peer") + } +} + +func TestPeersDiscovered(t *testing.T) { + peers := testutil.GeneratePeers(2) + spm := New(1, &fakePeerTagger{}) + + if spm.PeersDiscovered() { + t.Fatal("Expected not to have discovered peers yet") + } + + spm.AddPeer(peers[0]) + if !spm.PeersDiscovered() { + t.Fatal("Expected to have discovered peers") + } + + spm.RemovePeer(peers[0]) + if !spm.PeersDiscovered() { + t.Fatal("Expected to still have discovered peers") + } +} + +func TestPeerTagging(t *testing.T) { + peers := testutil.GeneratePeers(2) + fpt := &fakePeerTagger{} + spm := New(1, fpt) + + spm.AddPeer(peers[0]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have tagged one peer") + } + + spm.AddPeer(peers[0]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have tagged one peer") + } + + spm.AddPeer(peers[1]) + if len(fpt.taggedPeers) != 2 { + t.Fatal("Expected to have tagged two peers") + } + + spm.RemovePeer(peers[1]) + if len(fpt.taggedPeers) != 1 { + t.Fatal("Expected to have untagged peer") + } +} + +func TestProtectConnection(t *testing.T) { + peers := testutil.GeneratePeers(1) + peerA := peers[0] + fpt := newFakePeerTagger() + spm := New(1, fpt) + + // Should not protect connection if peer hasn't been added yet + spm.ProtectConnection(peerA) + if fpt.isProtected(peerA) { + t.Fatal("Expected peer not to be protected") + } + + // Once peer is added, should be able to protect connection + spm.AddPeer(peerA) + spm.ProtectConnection(peerA) + if !fpt.isProtected(peerA) { + t.Fatal("Expected peer to be protected") + } + + // Removing peer should unprotect connection + spm.RemovePeer(peerA) + if fpt.isProtected(peerA) { + t.Fatal("Expected peer to be unprotected") + } +} + +func TestShutdown(t *testing.T) { + peers := testutil.GeneratePeers(2) + fpt := newFakePeerTagger() + spm := New(1, fpt) + + spm.AddPeer(peers[0]) + spm.AddPeer(peers[1]) + if len(fpt.taggedPeers) != 2 { + t.Fatal("Expected to have tagged two peers") + } + + spm.ProtectConnection(peers[0]) + if !fpt.isProtected(peers[0]) { + t.Fatal("Expected peer to be protected") + } + + spm.Shutdown() + + if len(fpt.taggedPeers) != 0 { + t.Fatal("Expected to have untagged all peers") + } + if len(fpt.protectedPeers) != 0 { + t.Fatal("Expected to have unprotected all peers") + } +} diff --git a/internal/testutil/testutil.go b/internal/testutil/testutil.go new file mode 100644 index 0000000000000000000000000000000000000000..12e7813ac184e930ca1f7657a308dae63e753458 --- /dev/null +++ b/internal/testutil/testutil.go @@ -0,0 +1,140 @@ +package testutil + +import ( + "fmt" + "math/rand" + + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + "gitlab.dms3.io/dms3/go-bitswap/wantlist" + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + blocksutil "gitlab.dms3.io/dms3/go-dms3-blocksutil" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +var blockGenerator = blocksutil.NewBlockGenerator() +var prioritySeq int32 + +// GenerateBlocksOfSize generates a series of blocks of the given byte size +func GenerateBlocksOfSize(n int, size int64) []blocks.Block { + generatedBlocks := make([]blocks.Block, 0, n) + for i := 0; i < n; i++ { + // rand.Read never errors + buf := make([]byte, size) + rand.Read(buf) + b := blocks.NewBlock(buf) + generatedBlocks = append(generatedBlocks, b) + + } + return generatedBlocks +} + +// GenerateCids produces n content identifiers. +func GenerateCids(n int) []cid.Cid { + cids := make([]cid.Cid, 0, n) + for i := 0; i < n; i++ { + c := blockGenerator.Next().Cid() + cids = append(cids, c) + } + return cids +} + +// GenerateMessageEntries makes fake bitswap message entries. +func GenerateMessageEntries(n int, isCancel bool) []bsmsg.Entry { + bsmsgs := make([]bsmsg.Entry, 0, n) + for i := 0; i < n; i++ { + prioritySeq++ + msg := bsmsg.Entry{ + Entry: wantlist.NewRefEntry(blockGenerator.Next().Cid(), prioritySeq), + Cancel: isCancel, + } + bsmsgs = append(bsmsgs, msg) + } + return bsmsgs +} + +var peerSeq int + +// GeneratePeers creates n peer ids. +func GeneratePeers(n int) []peer.ID { + peerIds := make([]peer.ID, 0, n) + for i := 0; i < n; i++ { + peerSeq++ + p := peer.ID(fmt.Sprint(i)) + peerIds = append(peerIds, p) + } + return peerIds +} + +var nextSession uint64 + +// GenerateSessionID make a unit session identifier. +func GenerateSessionID() uint64 { + nextSession++ + return uint64(nextSession) +} + +// ContainsPeer returns true if a peer is found n a list of peers. +func ContainsPeer(peers []peer.ID, p peer.ID) bool { + for _, n := range peers { + if p == n { + return true + } + } + return false +} + +// IndexOf returns the index of a given cid in an array of blocks +func IndexOf(blks []blocks.Block, c cid.Cid) int { + for i, n := range blks { + if n.Cid() == c { + return i + } + } + return -1 +} + +// ContainsBlock returns true if a block is found n a list of blocks +func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { + return IndexOf(blks, block.Cid()) != -1 +} + +// ContainsKey returns true if a key is found n a list of CIDs. +func ContainsKey(ks []cid.Cid, c cid.Cid) bool { + for _, k := range ks { + if c == k { + return true + } + } + return false +} + +// MatchKeysIgnoreOrder returns true if the lists of CIDs match (even if +// they're in a different order) +func MatchKeysIgnoreOrder(ks1 []cid.Cid, ks2 []cid.Cid) bool { + if len(ks1) != len(ks2) { + return false + } + + for _, k := range ks1 { + if !ContainsKey(ks2, k) { + return false + } + } + return true +} + +// MatchPeersIgnoreOrder returns true if the lists of peers match (even if +// they're in a different order) +func MatchPeersIgnoreOrder(ps1 []peer.ID, ps2 []peer.ID) bool { + if len(ps1) != len(ps2) { + return false + } + + for _, p := range ps1 { + if !ContainsPeer(ps2, p) { + return false + } + } + return true +} diff --git a/internal/testutil/testutil_test.go b/internal/testutil/testutil_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6f8ea505caae792e089d0c991b7f4ce7d365c34d --- /dev/null +++ b/internal/testutil/testutil_test.go @@ -0,0 +1,16 @@ +package testutil + +import ( + "testing" + + blocks "gitlab.dms3.io/dms3/go-block-format" +) + +func TestGenerateBlocksOfSize(t *testing.T) { + for _, b1 := range GenerateBlocksOfSize(10, 100) { + b2 := blocks.NewBlock(b1.RawData()) + if b2.Cid() != b1.Cid() { + t.Fatal("block CIDs mismatch") + } + } +} diff --git a/message/message.go b/message/message.go new file mode 100644 index 0000000000000000000000000000000000000000..88fbcad5aea88a21ca8dbef54687f5d0d04d6431 --- /dev/null +++ b/message/message.go @@ -0,0 +1,500 @@ +package message + +import ( + "encoding/binary" + "errors" + "io" + + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + "gitlab.dms3.io/dms3/go-bitswap/wantlist" + + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + pool "gitlab.dms3.io/p2p/go-buffer-pool" + msgio "gitlab.dms3.io/p2p/go-msgio" + + u "gitlab.dms3.io/dms3/go-dms3-util" + "gitlab.dms3.io/p2p/go-p2p-core/network" +) + +// BitSwapMessage is the basic interface for interacting building, encoding, +// and decoding messages sent on the BitSwap protocol. +type BitSwapMessage interface { + // Wantlist returns a slice of unique keys that represent data wanted by + // the sender. + Wantlist() []Entry + + // Blocks returns a slice of unique blocks. + Blocks() []blocks.Block + // BlockPresences returns the list of HAVE / DONT_HAVE in the message + BlockPresences() []BlockPresence + // Haves returns the Cids for each HAVE + Haves() []cid.Cid + // DontHaves returns the Cids for each DONT_HAVE + DontHaves() []cid.Cid + // PendingBytes returns the number of outstanding bytes of data that the + // engine has yet to send to the client (because they didn't fit in this + // message) + PendingBytes() int32 + + // AddEntry adds an entry to the Wantlist. + AddEntry(key cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int + + // Cancel adds a CANCEL for the given CID to the message + // Returns the size of the CANCEL entry in the protobuf + Cancel(key cid.Cid) int + + // Remove removes any entries for the given CID. Useful when the want + // status for the CID changes when preparing a message. + Remove(key cid.Cid) + + // Empty indicates whether the message has any information + Empty() bool + // Size returns the size of the message in bytes + Size() int + + // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set + Full() bool + + // AddBlock adds a block to the message + AddBlock(blocks.Block) + // AddBlockPresence adds a HAVE / DONT_HAVE for the given Cid to the message + AddBlockPresence(cid.Cid, pb.Message_BlockPresenceType) + // AddHave adds a HAVE for the given Cid to the message + AddHave(cid.Cid) + // AddDontHave adds a DONT_HAVE for the given Cid to the message + AddDontHave(cid.Cid) + // SetPendingBytes sets the number of bytes of data that are yet to be sent + // to the client (because they didn't fit in this message) + SetPendingBytes(int32) + Exportable + + Loggable() map[string]interface{} + + // Reset the values in the message back to defaults, so it can be reused + Reset(bool) + + // Clone the message fields + Clone() BitSwapMessage +} + +// Exportable is an interface for structures than can be +// encoded in a bitswap protobuf. +type Exportable interface { + // Note that older Bitswap versions use a different wire format, so we need + // to convert the message to the appropriate format depending on which + // version of the protocol the remote peer supports. + ToProtoV0() *pb.Message + ToProtoV1() *pb.Message + ToNetV0(w io.Writer) error + ToNetV1(w io.Writer) error +} + +// BlockPresence represents a HAVE / DONT_HAVE for a given Cid +type BlockPresence struct { + Cid cid.Cid + Type pb.Message_BlockPresenceType +} + +// Entry is a wantlist entry in a Bitswap message, with flags indicating +// - whether message is a cancel +// - whether requester wants a DONT_HAVE message +// - whether requester wants a HAVE message (instead of the block) +type Entry struct { + wantlist.Entry + Cancel bool + SendDontHave bool +} + +// Get the size of the entry on the wire +func (e *Entry) Size() int { + epb := e.ToPB() + return epb.Size() +} + +// Get the entry in protobuf form +func (e *Entry) ToPB() pb.Message_Wantlist_Entry { + return pb.Message_Wantlist_Entry{ + Block: pb.Cid{Cid: e.Cid}, + Priority: int32(e.Priority), + Cancel: e.Cancel, + WantType: e.WantType, + SendDontHave: e.SendDontHave, + } +} + +var MaxEntrySize = maxEntrySize() + +func maxEntrySize() int { + var maxInt32 int32 = (1 << 31) - 1 + + c := cid.NewCidV0(u.Hash([]byte("cid"))) + e := Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: maxInt32, + WantType: pb.Message_Wantlist_Have, + }, + SendDontHave: true, // true takes up more space than false + Cancel: true, + } + return e.Size() +} + +type impl struct { + full bool + wantlist map[cid.Cid]*Entry + blocks map[cid.Cid]blocks.Block + blockPresences map[cid.Cid]pb.Message_BlockPresenceType + pendingBytes int32 +} + +// New returns a new, empty bitswap message +func New(full bool) BitSwapMessage { + return newMsg(full) +} + +func newMsg(full bool) *impl { + return &impl{ + full: full, + wantlist: make(map[cid.Cid]*Entry), + blocks: make(map[cid.Cid]blocks.Block), + blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), + } +} + +// Clone the message fields +func (m *impl) Clone() BitSwapMessage { + msg := newMsg(m.full) + for k := range m.wantlist { + msg.wantlist[k] = m.wantlist[k] + } + for k := range m.blocks { + msg.blocks[k] = m.blocks[k] + } + for k := range m.blockPresences { + msg.blockPresences[k] = m.blockPresences[k] + } + msg.pendingBytes = m.pendingBytes + return msg +} + +// Reset the values in the message back to defaults, so it can be reused +func (m *impl) Reset(full bool) { + m.full = full + for k := range m.wantlist { + delete(m.wantlist, k) + } + for k := range m.blocks { + delete(m.blocks, k) + } + for k := range m.blockPresences { + delete(m.blockPresences, k) + } + m.pendingBytes = 0 +} + +var errCidMissing = errors.New("missing cid") + +func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { + m := newMsg(pbm.Wantlist.Full) + for _, e := range pbm.Wantlist.Entries { + if !e.Block.Cid.Defined() { + return nil, errCidMissing + } + m.addEntry(e.Block.Cid, e.Priority, e.Cancel, e.WantType, e.SendDontHave) + } + + // deprecated + for _, d := range pbm.Blocks { + // CIDv0, sha256, protobuf only + b := blocks.NewBlock(d) + m.AddBlock(b) + } + // + + for _, b := range pbm.GetPayload() { + pref, err := cid.PrefixFromBytes(b.GetPrefix()) + if err != nil { + return nil, err + } + + c, err := pref.Sum(b.GetData()) + if err != nil { + return nil, err + } + + blk, err := blocks.NewBlockWithCid(b.GetData(), c) + if err != nil { + return nil, err + } + + m.AddBlock(blk) + } + + for _, bi := range pbm.GetBlockPresences() { + if !bi.Cid.Cid.Defined() { + return nil, errCidMissing + } + m.AddBlockPresence(bi.Cid.Cid, bi.Type) + } + + m.pendingBytes = pbm.PendingBytes + + return m, nil +} + +func (m *impl) Full() bool { + return m.full +} + +func (m *impl) Empty() bool { + return len(m.blocks) == 0 && len(m.wantlist) == 0 && len(m.blockPresences) == 0 +} + +func (m *impl) Wantlist() []Entry { + out := make([]Entry, 0, len(m.wantlist)) + for _, e := range m.wantlist { + out = append(out, *e) + } + return out +} + +func (m *impl) Blocks() []blocks.Block { + bs := make([]blocks.Block, 0, len(m.blocks)) + for _, block := range m.blocks { + bs = append(bs, block) + } + return bs +} + +func (m *impl) BlockPresences() []BlockPresence { + bps := make([]BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + bps = append(bps, BlockPresence{c, t}) + } + return bps +} + +func (m *impl) Haves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_Have) +} + +func (m *impl) DontHaves() []cid.Cid { + return m.getBlockPresenceByType(pb.Message_DontHave) +} + +func (m *impl) getBlockPresenceByType(t pb.Message_BlockPresenceType) []cid.Cid { + cids := make([]cid.Cid, 0, len(m.blockPresences)) + for c, bpt := range m.blockPresences { + if bpt == t { + cids = append(cids, c) + } + } + return cids +} + +func (m *impl) PendingBytes() int32 { + return m.pendingBytes +} + +func (m *impl) SetPendingBytes(pendingBytes int32) { + m.pendingBytes = pendingBytes +} + +func (m *impl) Remove(k cid.Cid) { + delete(m.wantlist, k) +} + +func (m *impl) Cancel(k cid.Cid) int { + return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) +} + +func (m *impl) AddEntry(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { + return m.addEntry(k, priority, false, wantType, sendDontHave) +} + +func (m *impl) addEntry(c cid.Cid, priority int32, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { + e, exists := m.wantlist[c] + if exists { + // Only change priority if want is of the same type + if e.WantType == wantType { + e.Priority = priority + } + // Only change from "dont cancel" to "do cancel" + if cancel { + e.Cancel = cancel + } + // Only change from "dont send" to "do send" DONT_HAVE + if sendDontHave { + e.SendDontHave = sendDontHave + } + // want-block overrides existing want-have + if wantType == pb.Message_Wantlist_Block && e.WantType == pb.Message_Wantlist_Have { + e.WantType = wantType + } + m.wantlist[c] = e + return 0 + } + + e = &Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: priority, + WantType: wantType, + }, + SendDontHave: sendDontHave, + Cancel: cancel, + } + m.wantlist[c] = e + + return e.Size() +} + +func (m *impl) AddBlock(b blocks.Block) { + delete(m.blockPresences, b.Cid()) + m.blocks[b.Cid()] = b +} + +func (m *impl) AddBlockPresence(c cid.Cid, t pb.Message_BlockPresenceType) { + if _, ok := m.blocks[c]; ok { + return + } + m.blockPresences[c] = t +} + +func (m *impl) AddHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_Have) +} + +func (m *impl) AddDontHave(c cid.Cid) { + m.AddBlockPresence(c, pb.Message_DontHave) +} + +func (m *impl) Size() int { + size := 0 + for _, block := range m.blocks { + size += len(block.RawData()) + } + for c := range m.blockPresences { + size += BlockPresenceSize(c) + } + for _, e := range m.wantlist { + size += e.Size() + } + + return size +} + +func BlockPresenceSize(c cid.Cid) int { + return (&pb.Message_BlockPresence{ + Cid: pb.Cid{Cid: c}, + Type: pb.Message_Have, + }).Size() +} + +// FromNet generates a new BitswapMessage from incoming data on an io.Reader. +func FromNet(r io.Reader) (BitSwapMessage, error) { + reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) + return FromMsgReader(reader) +} + +// FromPBReader generates a new Bitswap message from a gogo-protobuf reader +func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { + msg, err := r.ReadMsg() + if err != nil { + return nil, err + } + + var pb pb.Message + err = pb.Unmarshal(msg) + r.ReleaseMsg(msg) + if err != nil { + return nil, err + } + + return newMessageFromProto(pb) +} + +func (m *impl) ToProtoV0() *pb.Message { + pbm := new(pb.Message) + pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) + for _, e := range m.wantlist { + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) + } + pbm.Wantlist.Full = m.full + + blocks := m.Blocks() + pbm.Blocks = make([][]byte, 0, len(blocks)) + for _, b := range blocks { + pbm.Blocks = append(pbm.Blocks, b.RawData()) + } + return pbm +} + +func (m *impl) ToProtoV1() *pb.Message { + pbm := new(pb.Message) + pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) + for _, e := range m.wantlist { + pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) + } + pbm.Wantlist.Full = m.full + + blocks := m.Blocks() + pbm.Payload = make([]pb.Message_Block, 0, len(blocks)) + for _, b := range blocks { + pbm.Payload = append(pbm.Payload, pb.Message_Block{ + Data: b.RawData(), + Prefix: b.Cid().Prefix().Bytes(), + }) + } + + pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) + for c, t := range m.blockPresences { + pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ + Cid: pb.Cid{Cid: c}, + Type: t, + }) + } + + pbm.PendingBytes = m.PendingBytes() + + return pbm +} + +func (m *impl) ToNetV0(w io.Writer) error { + return write(w, m.ToProtoV0()) +} + +func (m *impl) ToNetV1(w io.Writer) error { + return write(w, m.ToProtoV1()) +} + +func write(w io.Writer, m *pb.Message) error { + size := m.Size() + + buf := pool.Get(size + binary.MaxVarintLen64) + defer pool.Put(buf) + + n := binary.PutUvarint(buf, uint64(size)) + + written, err := m.MarshalTo(buf[n:]) + if err != nil { + return err + } + n += written + + _, err = w.Write(buf[:n]) + return err +} + +func (m *impl) Loggable() map[string]interface{} { + blocks := make([]string, 0, len(m.blocks)) + for _, v := range m.blocks { + blocks = append(blocks, v.Cid().String()) + } + return map[string]interface{}{ + "blocks": blocks, + "wants": m.Wantlist(), + } +} diff --git a/message/message_test.go b/message/message_test.go new file mode 100644 index 0000000000000000000000000000000000000000..62610b45904cd43f5be9e2dab06ace021a874a49 --- /dev/null +++ b/message/message_test.go @@ -0,0 +1,311 @@ +package message + +import ( + "bytes" + "testing" + + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + "gitlab.dms3.io/dms3/go-bitswap/wantlist" + blocksutil "gitlab.dms3.io/dms3/go-dms3-blocksutil" + + blocks "gitlab.dms3.io/dms3/go-block-format" + cid "gitlab.dms3.io/dms3/go-cid" + u "gitlab.dms3.io/dms3/go-dms3-util" +) + +func mkFakeCid(s string) cid.Cid { + return cid.NewCidV0(u.Hash([]byte(s))) +} + +func TestAppendWanted(t *testing.T) { + str := mkFakeCid("foo") + m := New(true) + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) + + if !wantlistContains(&m.ToProtoV0().Wantlist, str) { + t.Fail() + } +} + +func TestNewMessageFromProto(t *testing.T) { + str := mkFakeCid("a_key") + protoMessage := new(pb.Message) + protoMessage.Wantlist.Entries = []pb.Message_Wantlist_Entry{ + {Block: pb.Cid{Cid: str}}, + } + if !wantlistContains(&protoMessage.Wantlist, str) { + t.Fail() + } + m, err := newMessageFromProto(*protoMessage) + if err != nil { + t.Fatal(err) + } + + if !wantlistContains(&m.ToProtoV0().Wantlist, str) { + t.Fail() + } +} + +func TestAppendBlock(t *testing.T) { + + strs := make([]string, 2) + strs = append(strs, "Celeritas") + strs = append(strs, "Incendia") + + m := New(true) + for _, str := range strs { + block := blocks.NewBlock([]byte(str)) + m.AddBlock(block) + } + + // assert strings are in proto message + for _, blockbytes := range m.ToProtoV0().GetBlocks() { + s := bytes.NewBuffer(blockbytes).String() + if !contains(strs, s) { + t.Fail() + } + } +} + +func TestWantlist(t *testing.T) { + keystrs := []cid.Cid{mkFakeCid("foo"), mkFakeCid("bar"), mkFakeCid("baz"), mkFakeCid("bat")} + m := New(true) + for _, s := range keystrs { + m.AddEntry(s, 1, pb.Message_Wantlist_Block, true) + } + exported := m.Wantlist() + + for _, k := range exported { + present := false + for _, s := range keystrs { + + if s.Equals(k.Cid) { + present = true + } + } + if !present { + t.Logf("%v isn't in original list", k.Cid) + t.Fail() + } + } +} + +func TestCopyProtoByValue(t *testing.T) { + str := mkFakeCid("foo") + m := New(true) + protoBeforeAppend := m.ToProtoV0() + m.AddEntry(str, 1, pb.Message_Wantlist_Block, true) + if wantlistContains(&protoBeforeAppend.Wantlist, str) { + t.Fail() + } +} + +func TestToNetFromNetPreservesWantList(t *testing.T) { + original := New(true) + original.AddEntry(mkFakeCid("M"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("B"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("D"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("T"), 1, pb.Message_Wantlist_Block, true) + original.AddEntry(mkFakeCid("F"), 1, pb.Message_Wantlist_Block, true) + + buf := new(bytes.Buffer) + if err := original.ToNetV1(buf); err != nil { + t.Fatal(err) + } + + copied, err := FromNet(buf) + if err != nil { + t.Fatal(err) + } + + if !copied.Full() { + t.Fatal("fullness attribute got dropped on marshal") + } + + keys := make(map[cid.Cid]bool) + for _, k := range copied.Wantlist() { + keys[k.Cid] = true + } + + for _, k := range original.Wantlist() { + if _, ok := keys[k.Cid]; !ok { + t.Fatalf("Key Missing: \"%v\"", k) + } + } +} + +func TestToAndFromNetMessage(t *testing.T) { + + original := New(true) + original.AddBlock(blocks.NewBlock([]byte("W"))) + original.AddBlock(blocks.NewBlock([]byte("E"))) + original.AddBlock(blocks.NewBlock([]byte("F"))) + original.AddBlock(blocks.NewBlock([]byte("M"))) + + buf := new(bytes.Buffer) + if err := original.ToNetV1(buf); err != nil { + t.Fatal(err) + } + + m2, err := FromNet(buf) + if err != nil { + t.Fatal(err) + } + + keys := make(map[cid.Cid]bool) + for _, b := range m2.Blocks() { + keys[b.Cid()] = true + } + + for _, b := range original.Blocks() { + if _, ok := keys[b.Cid()]; !ok { + t.Fail() + } + } +} + +func wantlistContains(wantlist *pb.Message_Wantlist, c cid.Cid) bool { + for _, e := range wantlist.GetEntries() { + if e.Block.Cid.Defined() && c.Equals(e.Block.Cid) { + return true + } + } + return false +} + +func contains(strs []string, x string) bool { + for _, s := range strs { + if s == x { + return true + } + } + return false +} + +func TestDuplicates(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + msg := New(true) + + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Block, true) + if len(msg.Wantlist()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + + msg.AddBlock(b) + msg.AddBlock(b) + if len(msg.Blocks()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + + b2 := blocks.NewBlock([]byte("bar")) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_Have) + if len(msg.Haves()) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } +} + +func TestBlockPresences(t *testing.T) { + b1 := blocks.NewBlock([]byte("foo")) + b2 := blocks.NewBlock([]byte("bar")) + msg := New(true) + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.Haves()) != 1 || !msg.Haves()[0].Equals(b1.Cid()) { + t.Fatal("Expected HAVE") + } + if len(msg.DontHaves()) != 1 || !msg.DontHaves()[0].Equals(b2.Cid()) { + t.Fatal("Expected HAVE") + } + + msg.AddBlock(b1) + if len(msg.Haves()) != 0 { + t.Fatal("Expected block to overwrite HAVE") + } + + msg.AddBlock(b2) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected block to overwrite DONT_HAVE") + } + + msg.AddBlockPresence(b1.Cid(), pb.Message_Have) + if len(msg.Haves()) != 0 { + t.Fatal("Expected HAVE not to overwrite block") + } + + msg.AddBlockPresence(b2.Cid(), pb.Message_DontHave) + if len(msg.DontHaves()) != 0 { + t.Fatal("Expected DONT_HAVE not to overwrite block") + } +} + +func TestAddWantlistEntry(t *testing.T) { + b := blocks.NewBlock([]byte("foo")) + msg := New(true) + + msg.AddEntry(b.Cid(), 1, pb.Message_Wantlist_Have, false) + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + entries := msg.Wantlist() + if len(entries) != 1 { + t.Fatal("Duplicate in BitSwapMessage") + } + e := entries[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-block should override want-have") + } + if e.SendDontHave != true { + t.Fatal("true SendDontHave should override false SendDontHave") + } + if e.Priority != 1 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 2, pb.Message_Wantlist_Block, true) + e = msg.Wantlist()[0] + if e.Priority != 2 { + t.Fatal("priority should be overridden if wants are of same type") + } + + msg.AddEntry(b.Cid(), 3, pb.Message_Wantlist_Have, false) + e = msg.Wantlist()[0] + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("want-have should not override want-block") + } + if e.SendDontHave != true { + t.Fatal("false SendDontHave should not override true SendDontHave") + } + if e.Priority != 2 { + t.Fatal("priority should only be overridden if wants are of same type") + } + + msg.Cancel(b.Cid()) + e = msg.Wantlist()[0] + if !e.Cancel { + t.Fatal("cancel should override want") + } + + msg.AddEntry(b.Cid(), 10, pb.Message_Wantlist_Block, true) + if !e.Cancel { + t.Fatal("want should not override cancel") + } +} + +func TestEntrySize(t *testing.T) { + blockGenerator := blocksutil.NewBlockGenerator() + c := blockGenerator.Next().Cid() + e := Entry{ + Entry: wantlist.Entry{ + Cid: c, + Priority: 10, + WantType: pb.Message_Wantlist_Have, + }, + SendDontHave: true, + Cancel: false, + } + epb := e.ToPB() + if e.Size() != epb.Size() { + t.Fatal("entry size calculation incorrect", e.Size(), epb.Size()) + } +} diff --git a/message/pb/Makefile b/message/pb/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..df34e54b013b6b98eeb65da57bae18af599872af --- /dev/null +++ b/message/pb/Makefile @@ -0,0 +1,11 @@ +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $< + +clean: + rm -f *.pb.go + rm -f *.go diff --git a/message/pb/cid.go b/message/pb/cid.go new file mode 100644 index 0000000000000000000000000000000000000000..791e394ef7c822bfc074314692f5cda97feb28ee --- /dev/null +++ b/message/pb/cid.go @@ -0,0 +1,44 @@ +package bitswap_message_pb + +import ( + "gitlab.dms3.io/dms3/go-cid" +) + +// NOTE: Don't "embed" the cid, wrap it like we're doing here. Otherwise, gogo +// will try to use the Bytes() function. + +// Cid is a custom type for CIDs in protobufs, that allows us to avoid +// reallocating. +type Cid struct { + Cid cid.Cid +} + +func (c Cid) Marshal() ([]byte, error) { + return c.Cid.Bytes(), nil +} + +func (c *Cid) MarshalTo(data []byte) (int, error) { + // intentionally using KeyString here to avoid allocating. + return copy(data[:c.Size()], c.Cid.KeyString()), nil +} + +func (c *Cid) Unmarshal(data []byte) (err error) { + c.Cid, err = cid.Cast(data) + return err +} + +func (c *Cid) Size() int { + return len(c.Cid.KeyString()) +} + +func (c Cid) MarshalJSON() ([]byte, error) { + return c.Cid.MarshalJSON() +} + +func (c *Cid) UnmarshalJSON(data []byte) error { + return c.Cid.UnmarshalJSON(data) +} + +func (c Cid) Equal(other Cid) bool { + return c.Cid.Equals(c.Cid) +} diff --git a/message/pb/cid_test.go b/message/pb/cid_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0c4b7e778c0e7192a6bec5f6022a2b844c3851c3 --- /dev/null +++ b/message/pb/cid_test.go @@ -0,0 +1,32 @@ +package bitswap_message_pb_test + +import ( + "bytes" + "testing" + + "gitlab.dms3.io/dms3/go-cid" + u "gitlab.dms3.io/dms3/go-dms3-util" + + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" +) + +func TestCID(t *testing.T) { + var expected = [...]byte{ + 10, 34, 18, 32, 195, 171, + 143, 241, 55, 32, 232, 173, + 144, 71, 221, 57, 70, 107, + 60, 137, 116, 229, 146, 194, + 250, 56, 61, 74, 57, 96, + 113, 76, 174, 240, 196, 242, + } + + c := cid.NewCidV0(u.Hash([]byte("foobar"))) + msg := pb.Message_BlockPresence{Cid: pb.Cid{Cid: c}} + actual, err := msg.Marshal() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(actual, expected[:]) { + t.Fatal("failed to correctly encode custom CID type") + } +} diff --git a/message/pb/message.pb.go b/message/pb/message.pb.go new file mode 100644 index 0000000000000000000000000000000000000000..c1effb8ea238f8b203204a1a75c06e4faa1c1f4b --- /dev/null +++ b/message/pb/message.pb.go @@ -0,0 +1,1584 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: message.proto + +package bitswap_message_pb + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Message_BlockPresenceType int32 + +const ( + Message_Have Message_BlockPresenceType = 0 + Message_DontHave Message_BlockPresenceType = 1 +) + +var Message_BlockPresenceType_name = map[int32]string{ + 0: "Have", + 1: "DontHave", +} + +var Message_BlockPresenceType_value = map[string]int32{ + "Have": 0, + "DontHave": 1, +} + +func (x Message_BlockPresenceType) String() string { + return proto.EnumName(Message_BlockPresenceType_name, int32(x)) +} + +func (Message_BlockPresenceType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} +} + +type Message_Wantlist_WantType int32 + +const ( + Message_Wantlist_Block Message_Wantlist_WantType = 0 + Message_Wantlist_Have Message_Wantlist_WantType = 1 +) + +var Message_Wantlist_WantType_name = map[int32]string{ + 0: "Block", + 1: "Have", +} + +var Message_Wantlist_WantType_value = map[string]int32{ + "Block": 0, + "Have": 1, +} + +func (x Message_Wantlist_WantType) String() string { + return proto.EnumName(Message_Wantlist_WantType_name, int32(x)) +} + +func (Message_Wantlist_WantType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} +} + +type Message struct { + Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` + Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` + Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` + BlockPresences []Message_BlockPresence `protobuf:"bytes,4,rep,name=blockPresences,proto3" json:"blockPresences"` + PendingBytes int32 `protobuf:"varint,5,opt,name=pendingBytes,proto3" json:"pendingBytes,omitempty"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetWantlist() Message_Wantlist { + if m != nil { + return m.Wantlist + } + return Message_Wantlist{} +} + +func (m *Message) GetBlocks() [][]byte { + if m != nil { + return m.Blocks + } + return nil +} + +func (m *Message) GetPayload() []Message_Block { + if m != nil { + return m.Payload + } + return nil +} + +func (m *Message) GetBlockPresences() []Message_BlockPresence { + if m != nil { + return m.BlockPresences + } + return nil +} + +func (m *Message) GetPendingBytes() int32 { + if m != nil { + return m.PendingBytes + } + return 0 +} + +type Message_Wantlist struct { + Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` + Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` +} + +func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } +func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } +func (*Message_Wantlist) ProtoMessage() {} +func (*Message_Wantlist) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} +} +func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_Wantlist) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist.Merge(m, src) +} +func (m *Message_Wantlist) XXX_Size() int { + return m.Size() +} +func (m *Message_Wantlist) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Wantlist.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Wantlist proto.InternalMessageInfo + +func (m *Message_Wantlist) GetEntries() []Message_Wantlist_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *Message_Wantlist) GetFull() bool { + if m != nil { + return m.Full + } + return false +} + +type Message_Wantlist_Entry struct { + Block Cid `protobuf:"bytes,1,opt,name=block,proto3,customtype=Cid" json:"block"` + Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` + Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` + WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` + SendDontHave bool `protobuf:"varint,5,opt,name=sendDontHave,proto3" json:"sendDontHave,omitempty"` +} + +func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } +func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } +func (*Message_Wantlist_Entry) ProtoMessage() {} +func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} +} +func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Wantlist_Entry.Merge(m, src) +} +func (m *Message_Wantlist_Entry) XXX_Size() int { + return m.Size() +} +func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Wantlist_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Wantlist_Entry proto.InternalMessageInfo + +func (m *Message_Wantlist_Entry) GetPriority() int32 { + if m != nil { + return m.Priority + } + return 0 +} + +func (m *Message_Wantlist_Entry) GetCancel() bool { + if m != nil { + return m.Cancel + } + return false +} + +func (m *Message_Wantlist_Entry) GetWantType() Message_Wantlist_WantType { + if m != nil { + return m.WantType + } + return Message_Wantlist_Block +} + +func (m *Message_Wantlist_Entry) GetSendDontHave() bool { + if m != nil { + return m.SendDontHave + } + return false +} + +type Message_Block struct { + Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Message_Block) Reset() { *m = Message_Block{} } +func (m *Message_Block) String() string { return proto.CompactTextString(m) } +func (*Message_Block) ProtoMessage() {} +func (*Message_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 1} +} +func (m *Message_Block) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_Block.Merge(m, src) +} +func (m *Message_Block) XXX_Size() int { + return m.Size() +} +func (m *Message_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Message_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_Block proto.InternalMessageInfo + +func (m *Message_Block) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +func (m *Message_Block) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type Message_BlockPresence struct { + Cid Cid `protobuf:"bytes,1,opt,name=cid,proto3,customtype=Cid" json:"cid"` + Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` +} + +func (m *Message_BlockPresence) Reset() { *m = Message_BlockPresence{} } +func (m *Message_BlockPresence) String() string { return proto.CompactTextString(m) } +func (*Message_BlockPresence) ProtoMessage() {} +func (*Message_BlockPresence) Descriptor() ([]byte, []int) { + return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} +} +func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message_BlockPresence.Merge(m, src) +} +func (m *Message_BlockPresence) XXX_Size() int { + return m.Size() +} +func (m *Message_BlockPresence) XXX_DiscardUnknown() { + xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) +} + +var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo + +func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { + if m != nil { + return m.Type + } + return Message_Have +} + +func init() { + proto.RegisterEnum("bitswap.message.pb.Message_BlockPresenceType", Message_BlockPresenceType_name, Message_BlockPresenceType_value) + proto.RegisterEnum("bitswap.message.pb.Message_Wantlist_WantType", Message_Wantlist_WantType_name, Message_Wantlist_WantType_value) + proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") + proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") + proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") + proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") + proto.RegisterType((*Message_BlockPresence)(nil), "bitswap.message.pb.Message.BlockPresence") +} + +func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } + +var fileDescriptor_33c57e4bae7b9afd = []byte{ + // 497 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x8a, 0xd3, 0x40, + 0x14, 0xc6, 0x33, 0x4d, 0xd2, 0xc6, 0xd3, 0xee, 0x52, 0xe7, 0x42, 0x42, 0xc0, 0x34, 0x5b, 0xbc, + 0x88, 0xca, 0x66, 0xa1, 0xfb, 0x04, 0x5b, 0xff, 0xa0, 0x82, 0x20, 0x83, 0xd0, 0xeb, 0xfc, 0x99, + 0xd6, 0xc1, 0x6c, 0x12, 0x33, 0x53, 0xd7, 0xbe, 0x85, 0x8f, 0xb5, 0x37, 0xc2, 0x5e, 0x8a, 0xca, + 0x22, 0xed, 0x8b, 0x48, 0x4e, 0xa6, 0x85, 0xba, 0xe2, 0xee, 0xdd, 0x9c, 0x33, 0xe7, 0xfb, 0x65, + 0xbe, 0xef, 0x10, 0x38, 0x38, 0xe7, 0x52, 0xc6, 0x0b, 0x1e, 0x55, 0x75, 0xa9, 0x4a, 0x4a, 0x13, + 0xa1, 0xe4, 0x45, 0x5c, 0x45, 0xbb, 0x76, 0xe2, 0x1d, 0x2f, 0x84, 0xfa, 0xb0, 0x4c, 0xa2, 0xb4, + 0x3c, 0x3f, 0x59, 0x94, 0x8b, 0xf2, 0x04, 0x47, 0x93, 0xe5, 0x1c, 0x2b, 0x2c, 0xf0, 0xd4, 0x22, + 0xc6, 0xbf, 0xba, 0xd0, 0x7b, 0xdb, 0xaa, 0xe9, 0x4b, 0x70, 0x2e, 0xe2, 0x42, 0xe5, 0x42, 0x2a, + 0x97, 0x04, 0x24, 0xec, 0x4f, 0x1e, 0x45, 0x37, 0xbf, 0x10, 0xe9, 0xf1, 0x68, 0xa6, 0x67, 0xa7, + 0xd6, 0xe5, 0xf5, 0xc8, 0x60, 0x3b, 0x2d, 0x7d, 0x00, 0xdd, 0x24, 0x2f, 0xd3, 0x8f, 0xd2, 0xed, + 0x04, 0x66, 0x38, 0x60, 0xba, 0xa2, 0x67, 0xd0, 0xab, 0xe2, 0x55, 0x5e, 0xc6, 0x99, 0x6b, 0x06, + 0x66, 0xd8, 0x9f, 0x1c, 0xfd, 0x0f, 0x3f, 0x6d, 0x44, 0x9a, 0xbd, 0xd5, 0xd1, 0x19, 0x1c, 0x22, + 0xec, 0x5d, 0xcd, 0x25, 0x2f, 0x52, 0x2e, 0x5d, 0x0b, 0x49, 0x8f, 0x6f, 0x25, 0x6d, 0x15, 0x9a, + 0xf8, 0x17, 0x86, 0x8e, 0x61, 0x50, 0xf1, 0x22, 0x13, 0xc5, 0x62, 0xba, 0x52, 0x5c, 0xba, 0x76, + 0x40, 0x42, 0x9b, 0xed, 0xf5, 0xbc, 0x9f, 0x1d, 0x70, 0xb6, 0xa6, 0xe9, 0x1b, 0xe8, 0xf1, 0x42, + 0xd5, 0x82, 0x4b, 0x97, 0xe0, 0x13, 0x9e, 0xdc, 0x25, 0xab, 0xe8, 0x45, 0xa1, 0xea, 0xd5, 0xd6, + 0x95, 0x06, 0x50, 0x0a, 0xd6, 0x7c, 0x99, 0xe7, 0x6e, 0x27, 0x20, 0xa1, 0xc3, 0xf0, 0xec, 0x7d, + 0x23, 0x60, 0xe3, 0x30, 0x3d, 0x02, 0x1b, 0x1f, 0x8b, 0x3b, 0x19, 0x4c, 0xfb, 0x8d, 0xf6, 0xc7, + 0xf5, 0xc8, 0x7c, 0x26, 0x32, 0xd6, 0xde, 0x50, 0x0f, 0x9c, 0xaa, 0x16, 0x65, 0x2d, 0xd4, 0x0a, + 0x21, 0x36, 0xdb, 0xd5, 0xcd, 0x36, 0xd2, 0xb8, 0x48, 0x79, 0xee, 0x9a, 0x88, 0xd7, 0x15, 0x7d, + 0xdd, 0x6e, 0xfb, 0xfd, 0xaa, 0xe2, 0xae, 0x15, 0x90, 0xf0, 0x70, 0x72, 0x7c, 0x27, 0x07, 0x33, + 0x2d, 0x62, 0x3b, 0x79, 0x13, 0x9e, 0xe4, 0x45, 0xf6, 0xbc, 0x2c, 0xd4, 0xab, 0xf8, 0x33, 0xc7, + 0xf0, 0x1c, 0xb6, 0xd7, 0x1b, 0x8f, 0xda, 0xec, 0x70, 0xfe, 0x1e, 0xd8, 0xb8, 0x93, 0xa1, 0x41, + 0x1d, 0xb0, 0x9a, 0xeb, 0x21, 0xf1, 0x4e, 0x75, 0xb3, 0x79, 0x70, 0x55, 0xf3, 0xb9, 0xf8, 0xd2, + 0x1a, 0x66, 0xba, 0x6a, 0x52, 0xca, 0x62, 0x15, 0xa3, 0xc1, 0x01, 0xc3, 0xb3, 0xf7, 0x09, 0x0e, + 0xf6, 0xb6, 0x4b, 0x1f, 0x82, 0x99, 0x8a, 0xec, 0x5f, 0x51, 0x35, 0x7d, 0x7a, 0x06, 0x96, 0x6a, + 0x0c, 0x77, 0x6e, 0x37, 0xbc, 0xc7, 0x45, 0xc3, 0x28, 0x1d, 0x3f, 0x85, 0xfb, 0x37, 0xae, 0x76, + 0x36, 0x0c, 0x3a, 0x00, 0x67, 0xeb, 0x79, 0x48, 0xa6, 0xee, 0xe5, 0xda, 0x27, 0x57, 0x6b, 0x9f, + 0xfc, 0x5e, 0xfb, 0xe4, 0xeb, 0xc6, 0x37, 0xae, 0x36, 0xbe, 0xf1, 0x7d, 0xe3, 0x1b, 0x49, 0x17, + 0xff, 0xbf, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x8a, 0xaf, 0x83, 0xd3, 0x03, 0x00, + 0x00, +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PendingBytes != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.PendingBytes)) + i-- + dAtA[i] = 0x28 + } + if len(m.BlockPresences) > 0 { + for iNdEx := len(m.BlockPresences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BlockPresences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Payload) > 0 { + for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Blocks[iNdEx]) + copy(dAtA[i:], m.Blocks[iNdEx]) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Blocks[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Wantlist.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Wantlist) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Full { + i-- + if m.Full { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SendDontHave { + i-- + if m.SendDontHave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.WantType != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.WantType)) + i-- + dAtA[i] = 0x20 + } + if m.Cancel { + i-- + if m.Cancel { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Priority != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x10 + } + { + size := m.Block.Size() + i -= size + if _, err := m.Block.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Message_Block) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if len(m.Prefix) > 0 { + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Message_BlockPresence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message_BlockPresence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Type != 0 { + i = encodeVarintMessage(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + { + size := m.Cid.Size() + i -= size + if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMessage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { + offset -= sovMessage(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Wantlist.Size() + n += 1 + l + sovMessage(uint64(l)) + if len(m.Blocks) > 0 { + for _, b := range m.Blocks { + l = len(b) + n += 1 + l + sovMessage(uint64(l)) + } + } + if len(m.Payload) > 0 { + for _, e := range m.Payload { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if len(m.BlockPresences) > 0 { + for _, e := range m.BlockPresences { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.PendingBytes != 0 { + n += 1 + sovMessage(uint64(m.PendingBytes)) + } + return n +} + +func (m *Message_Wantlist) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovMessage(uint64(l)) + } + } + if m.Full { + n += 2 + } + return n +} + +func (m *Message_Wantlist_Entry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Block.Size() + n += 1 + l + sovMessage(uint64(l)) + if m.Priority != 0 { + n += 1 + sovMessage(uint64(m.Priority)) + } + if m.Cancel { + n += 2 + } + if m.WantType != 0 { + n += 1 + sovMessage(uint64(m.WantType)) + } + if m.SendDontHave { + n += 2 + } + return n +} + +func (m *Message_Block) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovMessage(uint64(l)) + } + return n +} + +func (m *Message_BlockPresence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Cid.Size() + n += 1 + l + sovMessage(uint64(l)) + if m.Type != 0 { + n += 1 + sovMessage(uint64(m.Type)) + } + return n +} + +func sovMessage(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMessage(x uint64) (n int) { + return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Wantlist", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Wantlist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, make([]byte, postIndex-iNdEx)) + copy(m.Blocks[len(m.Blocks)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload, Message_Block{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockPresences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlockPresences = append(m.BlockPresences, Message_BlockPresence{}) + if err := m.BlockPresences[len(m.BlockPresences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingBytes", wireType) + } + m.PendingBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PendingBytes |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Wantlist: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Wantlist: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Message_Wantlist_Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Full", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Full = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + m.Priority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Priority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancel", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancel = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WantType", wireType) + } + m.WantType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WantType |= Message_Wantlist_WantType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendDontHave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SendDontHave = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_Block) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Block: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) + if m.Prefix == nil { + m.Prefix = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockPresence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockPresence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessage + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Message_BlockPresenceType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMessage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMessage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMessage + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessage + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessage + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") +) diff --git a/message/pb/message.proto b/message/pb/message.proto new file mode 100644 index 0000000000000000000000000000000000000000..e6c271cc2fca32586da8d9bfd1986b1e785bf1f1 --- /dev/null +++ b/message/pb/message.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package bitswap.message.pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +message Message { + + message Wantlist { + enum WantType { + Block = 0; + Have = 1; + } + + message Entry { + bytes block = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) + int32 priority = 2; // the priority (normalized). default to 1 + bool cancel = 3; // whether this revokes an entry + WantType wantType = 4; // Note: defaults to enum 0, ie Block + bool sendDontHave = 5; // Note: defaults to false + } + + repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries + bool full = 2; // whether this is the full wantlist. default to false + } + + message Block { + bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) + bytes data = 2; + } + + enum BlockPresenceType { + Have = 0; + DontHave = 1; + } + message BlockPresence { + bytes cid = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; + BlockPresenceType type = 2; + } + + Wantlist wantlist = 1 [(gogoproto.nullable) = false]; + repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 + repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 + repeated BlockPresence blockPresences = 4 [(gogoproto.nullable) = false]; + int32 pendingBytes = 5; +} diff --git a/network/connecteventmanager.go b/network/connecteventmanager.go new file mode 100644 index 0000000000000000000000000000000000000000..70d1c63b58589dacc16a931653c9ceacf5b1dc02 --- /dev/null +++ b/network/connecteventmanager.go @@ -0,0 +1,105 @@ +package network + +import ( + "sync" + + "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type ConnectionListener interface { + PeerConnected(peer.ID) + PeerDisconnected(peer.ID) +} + +type connectEventManager struct { + connListener ConnectionListener + lk sync.RWMutex + conns map[peer.ID]*connState +} + +type connState struct { + refs int + responsive bool +} + +func newConnectEventManager(connListener ConnectionListener) *connectEventManager { + return &connectEventManager{ + connListener: connListener, + conns: make(map[peer.ID]*connState), + } +} + +func (c *connectEventManager) Connected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + state = &connState{responsive: true} + c.conns[p] = state + } + state.refs++ + + if state.refs == 1 && state.responsive { + c.connListener.PeerConnected(p) + } +} + +func (c *connectEventManager) Disconnected(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok { + // Should never happen + return + } + state.refs-- + + if state.refs == 0 { + if state.responsive { + c.connListener.PeerDisconnected(p) + } + delete(c.conns, p) + } +} + +func (c *connectEventManager) MarkUnresponsive(p peer.ID) { + c.lk.Lock() + defer c.lk.Unlock() + + state, ok := c.conns[p] + if !ok || !state.responsive { + return + } + state.responsive = false + + c.connListener.PeerDisconnected(p) +} + +func (c *connectEventManager) OnMessage(p peer.ID) { + // This is a frequent operation so to avoid different message arrivals + // getting blocked by a write lock, first take a read lock to check if + // we need to modify state + c.lk.RLock() + state, ok := c.conns[p] + c.lk.RUnlock() + + if !ok || state.responsive { + return + } + + // We need to make a modification so now take a write lock + c.lk.Lock() + defer c.lk.Unlock() + + // Note: state may have changed in the time between when read lock + // was released and write lock taken, so check again + state, ok = c.conns[p] + if !ok || state.responsive { + return + } + + state.responsive = true + c.connListener.PeerConnected(p) +} diff --git a/network/connecteventmanager_test.go b/network/connecteventmanager_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e2003112fcdbacec962646dbb0adaaedb6115fa3 --- /dev/null +++ b/network/connecteventmanager_test.go @@ -0,0 +1,144 @@ +package network + +import ( + "testing" + + "gitlab.dms3.io/dms3/go-bitswap/internal/testutil" + "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +type mockConnListener struct { + conns map[peer.ID]int +} + +func newMockConnListener() *mockConnListener { + return &mockConnListener{ + conns: make(map[peer.ID]int), + } +} + +func (cl *mockConnListener) PeerConnected(p peer.ID) { + cl.conns[p]++ +} + +func (cl *mockConnListener) PeerDisconnected(p peer.ID) { + cl.conns[p]-- +} + +func TestConnectEventManagerConnectionCount(t *testing.T) { + connListener := newMockConnListener() + peers := testutil.GeneratePeers(2) + cem := newConnectEventManager(connListener) + + // Peer A: 1 Connection + cem.Connected(peers[0]) + if connListener.conns[peers[0]] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 2 Connections + cem.Connected(peers[0]) + if connListener.conns[peers[0]] != 1 { + t.Fatal("Unexpected no Connected event for the same peer") + } + + // Peer A: 2 Connections + // Peer B: 1 Connection + cem.Connected(peers[1]) + if connListener.conns[peers[1]] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 2 Connections + // Peer B: 0 Connections + cem.Disconnected(peers[1]) + if connListener.conns[peers[1]] != 0 { + t.Fatal("Expected Disconnected event") + } + + // Peer A: 1 Connection + // Peer B: 0 Connections + cem.Disconnected(peers[0]) + if connListener.conns[peers[0]] != 1 { + t.Fatal("Expected no Disconnected event for peer with one remaining conn") + } + + // Peer A: 0 Connections + // Peer B: 0 Connections + cem.Disconnected(peers[0]) + if connListener.conns[peers[0]] != 0 { + t.Fatal("Expected Disconnected event") + } +} + +func TestConnectEventManagerMarkUnresponsive(t *testing.T) { + connListener := newMockConnListener() + p := testutil.GeneratePeers(1)[0] + cem := newConnectEventManager(connListener) + + // Peer A: 1 Connection + cem.Connected(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 1 Connection + cem.MarkUnresponsive(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected Disconnected event") + } + + // Peer A: 2 Connections + cem.Connected(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected no Connected event for unresponsive peer") + } + + // Peer A: 2 Connections + cem.OnMessage(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected Connected event for newly responsive peer") + } + + // Peer A: 2 Connections + cem.OnMessage(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected no further Connected event for subsequent messages") + } + + // Peer A: 1 Connection + cem.Disconnected(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected no Disconnected event for peer with one remaining conn") + } + + // Peer A: 0 Connections + cem.Disconnected(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected Disconnected event") + } +} + +func TestConnectEventManagerDisconnectAfterMarkUnresponsive(t *testing.T) { + connListener := newMockConnListener() + p := testutil.GeneratePeers(1)[0] + cem := newConnectEventManager(connListener) + + // Peer A: 1 Connection + cem.Connected(p) + if connListener.conns[p] != 1 { + t.Fatal("Expected Connected event") + } + + // Peer A: 1 Connection + cem.MarkUnresponsive(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected Disconnected event") + } + + // Peer A: 0 Connections + cem.Disconnected(p) + if connListener.conns[p] != 0 { + t.Fatal("Expected not to receive a second Disconnected event") + } +} diff --git a/network/dms3_impl.go b/network/dms3_impl.go new file mode 100644 index 0000000000000000000000000000000000000000..417b877257c709d9700ecac283946e1f2f141bb6 --- /dev/null +++ b/network/dms3_impl.go @@ -0,0 +1,447 @@ +package network + +import ( + "context" + "errors" + "fmt" + "io" + "sync/atomic" + "time" + + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + + "github.com/multiformats/go-multistream" + cid "gitlab.dms3.io/dms3/go-cid" + logging "gitlab.dms3.io/dms3/go-log" + ma "gitlab.dms3.io/mf/go-multiaddr" + msgio "gitlab.dms3.io/p2p/go-msgio" + "gitlab.dms3.io/p2p/go-p2p-core/connmgr" + "gitlab.dms3.io/p2p/go-p2p-core/host" + "gitlab.dms3.io/p2p/go-p2p-core/network" + "gitlab.dms3.io/p2p/go-p2p-core/peer" + peerstore "gitlab.dms3.io/p2p/go-p2p-core/peerstore" + "gitlab.dms3.io/p2p/go-p2p-core/protocol" + "gitlab.dms3.io/p2p/go-p2p-core/routing" + "gitlab.dms3.io/p2p/go-p2p/p2p/protocol/ping" +) + +var log = logging.Logger("bitswap_network") + +var connectTimeout = time.Second * 5 +var sendMessageTimeout = time.Minute * 10 + +// NewFromDms3Host returns a BitSwapNetwork supported by underlying DMS3 host. +func NewFromDms3Host(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { + s := processSettings(opts...) + + bitswapNetwork := impl{ + host: host, + routing: r, + + protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, + protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, + protocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne, + protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, + + supportedProtocols: s.SupportedProtocols, + } + + return &bitswapNetwork +} + +func processSettings(opts ...NetOpt) Settings { + s := Settings{ + SupportedProtocols: []protocol.ID{ + ProtocolBitswap, + ProtocolBitswapOneOne, + ProtocolBitswapOneZero, + ProtocolBitswapNoVers, + }, + } + for _, opt := range opts { + opt(&s) + } + for i, proto := range s.SupportedProtocols { + s.SupportedProtocols[i] = s.ProtocolPrefix + proto + } + return s +} + +// impl transforms the dms3 network interface, which sends and receives +// NetMessage objects, into the bitswap network interface. +type impl struct { + // NOTE: Stats must be at the top of the heap allocation to ensure 64bit + // alignment. + stats Stats + + host host.Host + routing routing.ContentRouting + connectEvtMgr *connectEventManager + + protocolBitswapNoVers protocol.ID + protocolBitswapOneZero protocol.ID + protocolBitswapOneOne protocol.ID + protocolBitswap protocol.ID + + supportedProtocols []protocol.ID + + // inbound messages from the network are forwarded to the receiver + receiver Receiver +} + +type streamMessageSender struct { + to peer.ID + stream network.Stream + connected bool + bsnet *impl + opts *MessageSenderOpts +} + +// Open a stream to the remote peer +func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, error) { + if s.connected { + return s.stream, nil + } + + tctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) + defer cancel() + + if err := s.bsnet.ConnectTo(tctx, s.to); err != nil { + return nil, err + } + + stream, err := s.bsnet.newStreamToPeer(tctx, s.to) + if err != nil { + return nil, err + } + + s.stream = stream + s.connected = true + return s.stream, nil +} + +// Reset the stream +func (s *streamMessageSender) Reset() error { + if s.stream != nil { + err := s.stream.Reset() + s.connected = false + return err + } + return nil +} + +// Close the stream +func (s *streamMessageSender) Close() error { + return s.stream.Close() +} + +// Indicates whether the peer supports HAVE / DONT_HAVE messages +func (s *streamMessageSender) SupportsHave() bool { + return s.bsnet.SupportsHave(s.stream.Protocol()) +} + +// Send a message to the peer, attempting multiple times +func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { + return s.multiAttempt(ctx, func() error { + return s.send(ctx, msg) + }) +} + +// Perform a function with multiple attempts, and a timeout +func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) error { + // Try to call the function repeatedly + var err error + for i := 0; i < s.opts.MaxRetries; i++ { + if err = fn(); err == nil { + // Attempt was successful + return nil + } + + // Attempt failed + + // If the sender has been closed or the context cancelled, just bail out + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Protocol is not supported, so no need to try multiple times + if errors.Is(err, multistream.ErrNotSupported) { + s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) + return err + } + + // Failed to send so reset stream and try again + _ = s.Reset() + + // Failed too many times so mark the peer as unresponsive and return an error + if i == s.opts.MaxRetries-1 { + s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(s.opts.SendErrorBackoff): + // wait a short time in case disconnect notifications are still propagating + log.Infof("send message to %s failed but context was not Done: %s", s.to, err) + } + } + return err +} + +// Send a message to the peer +func (s *streamMessageSender) send(ctx context.Context, msg bsmsg.BitSwapMessage) error { + start := time.Now() + stream, err := s.Connect(ctx) + if err != nil { + log.Infof("failed to open stream to %s: %s", s.to, err) + return err + } + + // The send timeout includes the time required to connect + // (although usually we will already have connected - we only need to + // connect after a failed attempt to send) + timeout := s.opts.SendTimeout - time.Since(start) + if err = s.bsnet.msgToStream(ctx, stream, msg, timeout); err != nil { + log.Infof("failed to send message to %s: %s", s.to, err) + return err + } + + return nil +} + +func (bsnet *impl) Self() peer.ID { + return bsnet.host.ID() +} + +func (bsnet *impl) Ping(ctx context.Context, p peer.ID) ping.Result { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + res := <-ping.Ping(ctx, bsnet.host, p) + return res +} + +func (bsnet *impl) Latency(p peer.ID) time.Duration { + return bsnet.host.Peerstore().LatencyEWMA(p) +} + +// Indicates whether the given protocol supports HAVE / DONT_HAVE messages +func (bsnet *impl) SupportsHave(proto protocol.ID) bool { + switch proto { + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: + return false + } + return true +} + +func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + if dl, ok := ctx.Deadline(); ok && dl.Before(deadline) { + deadline = dl + } + + if err := s.SetWriteDeadline(deadline); err != nil { + log.Warnf("error setting deadline: %s", err) + } + + // Older Bitswap versions use a slightly different wire format so we need + // to convert the message to the appropriate format depending on the remote + // peer's Bitswap version. + switch s.Protocol() { + case bsnet.protocolBitswapOneOne, bsnet.protocolBitswap: + if err := msg.ToNetV1(s); err != nil { + log.Debugf("error: %s", err) + return err + } + case bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: + if err := msg.ToNetV0(s); err != nil { + log.Debugf("error: %s", err) + return err + } + default: + return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) + } + + atomic.AddUint64(&bsnet.stats.MessagesSent, 1) + + if err := s.SetWriteDeadline(time.Time{}); err != nil { + log.Warnf("error resetting deadline: %s", err) + } + return nil +} + +func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { + opts = setDefaultOpts(opts) + + sender := &streamMessageSender{ + to: p, + bsnet: bsnet, + opts: opts, + } + + err := sender.multiAttempt(ctx, func() error { + _, err := sender.Connect(ctx) + return err + }) + + if err != nil { + return nil, err + } + + return sender, nil +} + +func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { + copy := *opts + if opts.MaxRetries == 0 { + copy.MaxRetries = 3 + } + if opts.SendTimeout == 0 { + copy.SendTimeout = sendMessageTimeout + } + if opts.SendErrorBackoff == 0 { + copy.SendErrorBackoff = 100 * time.Millisecond + } + return © +} + +func (bsnet *impl) SendMessage( + ctx context.Context, + p peer.ID, + outgoing bsmsg.BitSwapMessage) error { + + tctx, cancel := context.WithTimeout(ctx, connectTimeout) + defer cancel() + + s, err := bsnet.newStreamToPeer(tctx, p) + if err != nil { + return err + } + + if err = bsnet.msgToStream(ctx, s, outgoing, sendMessageTimeout); err != nil { + _ = s.Reset() + return err + } + + return s.Close() +} + +func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { + return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) +} + +func (bsnet *impl) SetDelegate(r Receiver) { + bsnet.receiver = r + bsnet.connectEvtMgr = newConnectEventManager(r) + for _, proto := range bsnet.supportedProtocols { + bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) + } + bsnet.host.Network().Notify((*netNotifiee)(bsnet)) + // TODO: StopNotify. + +} + +func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { + return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) +} + +func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { + panic("Not implemented: DisconnectFrom() is only used by tests") +} + +// FindProvidersAsync returns a channel of providers for the given key. +func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + out := make(chan peer.ID, max) + go func() { + defer close(out) + providers := bsnet.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + if info.ID == bsnet.host.ID() { + continue // ignore self as provider + } + bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL) + select { + case <-ctx.Done(): + return + case out <- info.ID: + } + } + }() + return out +} + +// Provide provides the key to the network +func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { + return bsnet.routing.Provide(ctx, k, true) +} + +// handleNewStream receives a new stream from the network. +func (bsnet *impl) handleNewStream(s network.Stream) { + defer s.Close() + + if bsnet.receiver == nil { + _ = s.Reset() + return + } + + reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) + for { + received, err := bsmsg.FromMsgReader(reader) + if err != nil { + if err != io.EOF { + _ = s.Reset() + bsnet.receiver.ReceiveError(err) + log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) + } + return + } + + p := s.Conn().RemotePeer() + ctx := context.Background() + log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) + bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) + bsnet.receiver.ReceiveMessage(ctx, p, received) + atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) + } +} + +func (bsnet *impl) ConnectionManager() connmgr.ConnManager { + return bsnet.host.ConnManager() +} + +func (bsnet *impl) Stats() Stats { + return Stats{ + MessagesRecvd: atomic.LoadUint64(&bsnet.stats.MessagesRecvd), + MessagesSent: atomic.LoadUint64(&bsnet.stats.MessagesSent), + } +} + +type netNotifiee impl + +func (nn *netNotifiee) impl() *impl { + return (*impl)(nn) +} + +func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { + // ignore transient connections + if v.Stat().Transient { + return + } + + nn.impl().connectEvtMgr.Connected(v.RemotePeer()) +} +func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { + // ignore transient connections + if v.Stat().Transient { + return + } + + nn.impl().connectEvtMgr.Disconnected(v.RemotePeer()) +} +func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} +func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} +func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} +func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} diff --git a/network/dms3_impl_test.go b/network/dms3_impl_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b09fc55177bd23c10794787f8eda90567837bba0 --- /dev/null +++ b/network/dms3_impl_test.go @@ -0,0 +1,602 @@ +package network_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/multiformats/go-multistream" + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + tn "gitlab.dms3.io/dms3/go-bitswap/testnet" + ds "gitlab.dms3.io/dms3/go-datastore" + blocksutil "gitlab.dms3.io/dms3/go-dms3-blocksutil" + mockrouting "gitlab.dms3.io/dms3/go-dms3-routing/mock" + + "gitlab.dms3.io/p2p/go-p2p-core/host" + "gitlab.dms3.io/p2p/go-p2p-core/network" + "gitlab.dms3.io/p2p/go-p2p-core/peer" + "gitlab.dms3.io/p2p/go-p2p-core/protocol" + tnet "gitlab.dms3.io/p2p/go-p2p-testing/net" + mocknet "gitlab.dms3.io/p2p/go-p2p/p2p/net/mock" +) + +// Receiver is an interface for receiving messages from the GraphSyncNetwork. +type receiver struct { + peers map[peer.ID]struct{} + messageReceived chan struct{} + connectionEvent chan bool + lastMessage bsmsg.BitSwapMessage + lastSender peer.ID + listener network.Notifiee +} + +func newReceiver() *receiver { + return &receiver{ + peers: make(map[peer.ID]struct{}), + messageReceived: make(chan struct{}), + connectionEvent: make(chan bool, 1), + } +} + +func (r *receiver) ReceiveMessage( + ctx context.Context, + sender peer.ID, + incoming bsmsg.BitSwapMessage) { + r.lastSender = sender + r.lastMessage = incoming + select { + case <-ctx.Done(): + case r.messageReceived <- struct{}{}: + } +} + +func (r *receiver) ReceiveError(err error) { +} + +func (r *receiver) PeerConnected(p peer.ID) { + r.peers[p] = struct{}{} + r.connectionEvent <- true +} + +func (r *receiver) PeerDisconnected(p peer.ID) { + delete(r.peers, p) + r.connectionEvent <- false +} + +var errMockNetErr = fmt.Errorf("network err") + +type ErrStream struct { + network.Stream + lk sync.Mutex + err error + timingOut bool +} + +type ErrHost struct { + host.Host + lk sync.Mutex + err error + timingOut bool + streams []*ErrStream +} + +func (es *ErrStream) Write(b []byte) (int, error) { + es.lk.Lock() + defer es.lk.Unlock() + + if es.err != nil { + return 0, es.err + } + if es.timingOut { + return 0, context.DeadlineExceeded + } + return es.Stream.Write(b) +} + +func (eh *ErrHost) Connect(ctx context.Context, pi peer.AddrInfo) error { + eh.lk.Lock() + defer eh.lk.Unlock() + + if eh.err != nil { + return eh.err + } + if eh.timingOut { + return context.DeadlineExceeded + } + return eh.Host.Connect(ctx, pi) +} + +func (eh *ErrHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.ID) (network.Stream, error) { + eh.lk.Lock() + defer eh.lk.Unlock() + + if eh.err != nil { + return nil, errMockNetErr + } + if eh.timingOut { + return nil, context.DeadlineExceeded + } + stream, err := eh.Host.NewStream(ctx, p, pids...) + estrm := &ErrStream{Stream: stream, err: eh.err, timingOut: eh.timingOut} + + eh.streams = append(eh.streams, estrm) + return estrm, err +} + +func (eh *ErrHost) setError(err error) { + eh.lk.Lock() + defer eh.lk.Unlock() + + eh.err = err + for _, s := range eh.streams { + s.lk.Lock() + s.err = err + s.lk.Unlock() + } +} + +func (eh *ErrHost) setTimeoutState(timingOut bool) { + eh.lk.Lock() + defer eh.lk.Unlock() + + eh.timingOut = timingOut + for _, s := range eh.streams { + s.lk.Lock() + s.timingOut = timingOut + s.lk.Unlock() + } +} + +func TestMessageSendAndReceive(t *testing.T) { + // create network + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatal("Unable to setup network") + } + p1 := tnet.RandIdentityOrFatal(t) + p2 := tnet.RandIdentityOrFatal(t) + + bsnet1 := streamNet.Adapter(p1) + bsnet2 := streamNet.Adapter(p2) + r1 := newReceiver() + r2 := newReceiver() + bsnet1.SetDelegate(r1) + bsnet2.SetDelegate(r2) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("did not connect peer") + case <-r1.connectionEvent: + } + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("did not connect peer") + case <-r2.connectionEvent: + } + if _, ok := r1.peers[p2.ID()]; !ok { + t.Fatal("did to connect to correct peer") + } + if _, ok := r2.peers[p1.ID()]; !ok { + t.Fatal("did to connect to correct peer") + } + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + block2 := blockGenerator.Next() + sent := bsmsg.New(false) + sent.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + sent.AddBlock(block2) + + err = bsnet1.SendMessage(ctx, p2.ID(), sent) + if err != nil { + t.Fatal(err) + } + + select { + case <-ctx.Done(): + t.Fatal("did not receive message sent") + case <-r2.messageReceived: + } + + sender := r2.lastSender + if sender != p1.ID() { + t.Fatal("received message from wrong node") + } + + received := r2.lastMessage + + sentWants := sent.Wantlist() + if len(sentWants) != 1 { + t.Fatal("Did not add want to sent message") + } + sentWant := sentWants[0] + receivedWants := received.Wantlist() + if len(receivedWants) != 1 { + t.Fatal("Did not add want to received message") + } + receivedWant := receivedWants[0] + if receivedWant.Cid != sentWant.Cid || + receivedWant.Priority != sentWant.Priority || + receivedWant.Cancel != sentWant.Cancel { + t.Fatal("Sent message wants did not match received message wants") + } + sentBlocks := sent.Blocks() + if len(sentBlocks) != 1 { + t.Fatal("Did not add block to sent message") + } + sentBlock := sentBlocks[0] + receivedBlocks := received.Blocks() + if len(receivedBlocks) != 1 { + t.Fatal("Did not add response to received message") + } + receivedBlock := receivedBlocks[0] + if receivedBlock.Cid() != sentBlock.Cid() { + t.Fatal("Sent message blocks did not match received message blocks") + } +} + +func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *receiver, p2 tnet.Identity, r2 *receiver) (*ErrHost, bsnet.BitSwapNetwork, *ErrHost, bsnet.BitSwapNetwork, bsmsg.BitSwapMessage) { + // create network + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + + // Host 1 + h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) + if err != nil { + t.Fatal(err) + } + eh1 := &ErrHost{Host: h1} + routing1 := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) + bsnet1 := bsnet.NewFromDms3Host(eh1, routing1) + bsnet1.SetDelegate(r1) + if r1.listener != nil { + eh1.Network().Notify(r1.listener) + } + + // Host 2 + h2, err := mn.AddPeer(p2.PrivateKey(), p2.Address()) + if err != nil { + t.Fatal(err) + } + eh2 := &ErrHost{Host: h2} + routing2 := mr.ClientWithDatastore(context.TODO(), p2, ds.NewMapDatastore()) + bsnet2 := bsnet.NewFromDms3Host(eh2, routing2) + bsnet2.SetDelegate(r2) + if r2.listener != nil { + eh2.Network().Notify(r2.listener) + } + + // Networking + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + err = bsnet1.ConnectTo(ctx, p2.ID()) + if err != nil { + t.Fatal(err) + } + isConnected := <-r1.connectionEvent + if !isConnected { + t.Fatal("Expected connect event") + } + + err = bsnet2.ConnectTo(ctx, p1.ID()) + if err != nil { + t.Fatal(err) + } + + blockGenerator := blocksutil.NewBlockGenerator() + block1 := blockGenerator.Next() + msg := bsmsg.New(false) + msg.AddEntry(block1.Cid(), 1, pb.Message_Wantlist_Block, true) + + return eh1, bsnet1, eh2, bsnet2, msg +} + +func TestMessageResendAfterError(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + + testSendErrorBackoff := 100 * time.Millisecond + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: testSendErrorBackoff, + }) + if err != nil { + t.Fatal(err) + } + defer ms.Close() + + // Return an error from the networking layer the next time we try to send + // a message + eh.setError(errMockNetErr) + + go func() { + time.Sleep(testSendErrorBackoff / 2) + // Stop throwing errors so that the following attempt to send succeeds + eh.setError(nil) + }() + + // Send message with retries, first one should fail, then subsequent + // message should succeed + err = ms.SendMsg(ctx, msg) + if err != nil { + t.Fatal(err) + } + + select { + case <-ctx.Done(): + t.Fatal("did not receive message sent") + case <-r2.messageReceived: + } +} + +func TestMessageSendTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + eh, bsnet1, _, _, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: 100 * time.Millisecond, + }) + if err != nil { + t.Fatal(err) + } + defer ms.Close() + + // Return a DeadlineExceeded error from the networking layer the next time we try to + // send a message + eh.setTimeoutState(true) + + // Send message with retries, all attempts should fail + err = ms.SendMsg(ctx, msg) + if err == nil { + t.Fatal("Expected error from SednMsg") + } + + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Did not receive disconnect event") + case isConnected := <-r1.connectionEvent: + if isConnected { + t.Fatal("Expected disconnect event (got connect event)") + } + } +} + +func TestMessageSendNotSupportedResponse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + eh, bsnet1, _, _, _ := prepareNetwork(t, ctx, p1, r1, p2, r2) + + eh.setError(multistream.ErrNotSupported) + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{ + MaxRetries: 3, + SendTimeout: 100 * time.Millisecond, + SendErrorBackoff: 100 * time.Millisecond, + }) + if err == nil { + ms.Close() + t.Fatal("Expected ErrNotSupported") + } + + select { + case <-time.After(500 * time.Millisecond): + t.Fatal("Did not receive disconnect event") + case isConnected := <-r1.connectionEvent: + if isConnected { + t.Fatal("Expected disconnect event (got connect event)") + } + } +} + +func TestSupportsHave(t *testing.T) { + ctx := context.Background() + mn := mocknet.New(ctx) + mr := mockrouting.NewServer() + streamNet, err := tn.StreamNet(ctx, mn, mr) + if err != nil { + t.Fatalf("Unable to setup network: %s", err) + } + + type testCase struct { + proto protocol.ID + expSupportsHave bool + } + + testCases := []testCase{ + testCase{bsnet.ProtocolBitswap, true}, + testCase{bsnet.ProtocolBitswapOneOne, false}, + testCase{bsnet.ProtocolBitswapOneZero, false}, + testCase{bsnet.ProtocolBitswapNoVers, false}, + } + + for _, tc := range testCases { + p1 := tnet.RandIdentityOrFatal(t) + bsnet1 := streamNet.Adapter(p1) + bsnet1.SetDelegate(newReceiver()) + + p2 := tnet.RandIdentityOrFatal(t) + bsnet2 := streamNet.Adapter(p2, bsnet.SupportedProtocols([]protocol.ID{tc.proto})) + bsnet2.SetDelegate(newReceiver()) + + err = mn.LinkAll() + if err != nil { + t.Fatal(err) + } + + senderCurrent, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) + if err != nil { + t.Fatal(err) + } + defer senderCurrent.Close() + + if senderCurrent.SupportsHave() != tc.expSupportsHave { + t.Fatal("Expected sender HAVE message support", tc.proto, tc.expSupportsHave) + } + } +} + +func testNetworkCounters(t *testing.T, n1 int, n2 int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p1 := tnet.RandIdentityOrFatal(t) + r1 := newReceiver() + p2 := tnet.RandIdentityOrFatal(t) + r2 := newReceiver() + + var wg1, wg2 sync.WaitGroup + r1.listener = &network.NotifyBundle{ + OpenedStreamF: func(n network.Network, s network.Stream) { + wg1.Add(1) + }, + ClosedStreamF: func(n network.Network, s network.Stream) { + wg1.Done() + }, + } + r2.listener = &network.NotifyBundle{ + OpenedStreamF: func(n network.Network, s network.Stream) { + wg2.Add(1) + }, + ClosedStreamF: func(n network.Network, s network.Stream) { + wg2.Done() + }, + } + _, bsnet1, _, bsnet2, msg := prepareNetwork(t, ctx, p1, r1, p2, r2) + + for n := 0; n < n1; n++ { + ctx, cancel := context.WithTimeout(ctx, time.Second) + err := bsnet1.SendMessage(ctx, p2.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p2 did not receive message sent") + case <-r2.messageReceived: + for j := 0; j < 2; j++ { + err := bsnet2.SendMessage(ctx, p1.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p1 did not receive message sent") + case <-r1.messageReceived: + } + } + } + cancel() + } + + if n2 > 0 { + ms, err := bsnet1.NewMessageSender(ctx, p2.ID(), &bsnet.MessageSenderOpts{}) + if err != nil { + t.Fatal(err) + } + defer ms.Close() + for n := 0; n < n2; n++ { + ctx, cancel := context.WithTimeout(ctx, time.Second) + err = ms.SendMsg(ctx, msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p2 did not receive message sent") + case <-r2.messageReceived: + for j := 0; j < 2; j++ { + err := bsnet2.SendMessage(ctx, p1.ID(), msg) + if err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("p1 did not receive message sent") + case <-r1.messageReceived: + } + } + } + cancel() + } + ms.Close() + } + + // Wait until all streams are closed and MessagesRecvd counters + // updated. + ctxto, cancelto := context.WithTimeout(ctx, 5*time.Second) + defer cancelto() + ctxwait, cancelwait := context.WithCancel(ctx) + defer cancelwait() + go func() { + wg1.Wait() + wg2.Wait() + cancelwait() + }() + select { + case <-ctxto.Done(): + t.Fatal("network streams closing timed out") + case <-ctxwait.Done(): + } + + if bsnet1.Stats().MessagesSent != uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d sent messages, got %d", n1+n2, bsnet1.Stats().MessagesSent)) + } + + if bsnet2.Stats().MessagesRecvd != uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d received messages, got %d", n1+n2, bsnet2.Stats().MessagesRecvd)) + } + + if bsnet1.Stats().MessagesRecvd != 2*uint64(n1+n2) { + t.Fatal(fmt.Errorf("expected %d received reply messages, got %d", 2*(n1+n2), bsnet1.Stats().MessagesRecvd)) + } +} + +func TestNetworkCounters(t *testing.T) { + for n := 0; n < 11; n++ { + testNetworkCounters(t, 10-n, n) + } +} diff --git a/network/interface.go b/network/interface.go new file mode 100644 index 0000000000000000000000000000000000000000..eecba4efc083a85f6e142a87b19f505a8a2c4ab9 --- /dev/null +++ b/network/interface.go @@ -0,0 +1,110 @@ +package network + +import ( + "context" + "time" + + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + + cid "gitlab.dms3.io/dms3/go-cid" + + "gitlab.dms3.io/p2p/go-p2p-core/connmgr" + "gitlab.dms3.io/p2p/go-p2p-core/peer" + "gitlab.dms3.io/p2p/go-p2p-core/protocol" + "gitlab.dms3.io/p2p/go-p2p/p2p/protocol/ping" +) + +var ( + // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol + ProtocolBitswapNoVers protocol.ID = "/dms3/bitswap" + // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol + ProtocolBitswapOneZero protocol.ID = "/dms3/bitswap/1.0.0" + // ProtocolBitswapOneOne is the the prefix for version 1.1.0 + ProtocolBitswapOneOne protocol.ID = "/dms3/bitswap/1.1.0" + // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 + ProtocolBitswap protocol.ID = "/dms3/bitswap/1.2.0" +) + +// BitSwapNetwork provides network connectivity for BitSwap sessions. +type BitSwapNetwork interface { + Self() peer.ID + + // SendMessage sends a BitSwap message to a peer. + SendMessage( + context.Context, + peer.ID, + bsmsg.BitSwapMessage) error + + // SetDelegate registers the Reciver to handle messages received from the + // network. + SetDelegate(Receiver) + + ConnectTo(context.Context, peer.ID) error + DisconnectFrom(context.Context, peer.ID) error + + NewMessageSender(context.Context, peer.ID, *MessageSenderOpts) (MessageSender, error) + + ConnectionManager() connmgr.ConnManager + + Stats() Stats + + Routing + + Pinger +} + +// MessageSender is an interface for sending a series of messages over the bitswap +// network +type MessageSender interface { + SendMsg(context.Context, bsmsg.BitSwapMessage) error + Close() error + Reset() error + // Indicates whether the remote peer supports HAVE / DONT_HAVE messages + SupportsHave() bool +} + +type MessageSenderOpts struct { + MaxRetries int + SendTimeout time.Duration + SendErrorBackoff time.Duration +} + +// Receiver is an interface that can receive messages from the BitSwapNetwork. +type Receiver interface { + ReceiveMessage( + ctx context.Context, + sender peer.ID, + incoming bsmsg.BitSwapMessage) + + ReceiveError(error) + + // Connected/Disconnected warns bitswap about peer connections. + PeerConnected(peer.ID) + PeerDisconnected(peer.ID) +} + +// Routing is an interface to providing and finding providers on a bitswap +// network. +type Routing interface { + // FindProvidersAsync returns a channel of providers for the given key. + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID + + // Provide provides the key to the network. + Provide(context.Context, cid.Cid) error +} + +// Pinger is an interface to ping a peer and get the average latency of all pings +type Pinger interface { + // Ping a peer + Ping(context.Context, peer.ID) ping.Result + // Get the average latency of all pings + Latency(peer.ID) time.Duration +} + +// Stats is a container for statistics about the bitswap network +// the numbers inside are specific to bitswap, and not any other protocols +// using the same underlying network. +type Stats struct { + MessagesSent uint64 + MessagesRecvd uint64 +} diff --git a/network/options.go b/network/options.go new file mode 100644 index 0000000000000000000000000000000000000000..bce6b86bc49754af97def7488808ce8e0764e70c --- /dev/null +++ b/network/options.go @@ -0,0 +1,22 @@ +package network + +import "gitlab.dms3.io/p2p/go-p2p-core/protocol" + +type NetOpt func(*Settings) + +type Settings struct { + ProtocolPrefix protocol.ID + SupportedProtocols []protocol.ID +} + +func Prefix(prefix protocol.ID) NetOpt { + return func(settings *Settings) { + settings.ProtocolPrefix = prefix + } +} + +func SupportedProtocols(protos []protocol.ID) NetOpt { + return func(settings *Settings) { + settings.SupportedProtocols = protos + } +} diff --git a/stat.go b/stat.go new file mode 100644 index 0000000000000000000000000000000000000000..216d6cd149108f53e5936ee019f2252cb2c86ea4 --- /dev/null +++ b/stat.go @@ -0,0 +1,48 @@ +package bitswap + +import ( + "sort" + + cid "gitlab.dms3.io/dms3/go-cid" +) + +// Stat is a struct that provides various statistics on bitswap operations +type Stat struct { + ProvideBufLen int + Wantlist []cid.Cid + Peers []string + BlocksReceived uint64 + DataReceived uint64 + BlocksSent uint64 + DataSent uint64 + DupBlksReceived uint64 + DupDataReceived uint64 + MessagesReceived uint64 +} + +// Stat returns aggregated statistics about bitswap operations +func (bs *Bitswap) Stat() (*Stat, error) { + st := new(Stat) + st.ProvideBufLen = len(bs.newBlocks) + st.Wantlist = bs.GetWantlist() + bs.counterLk.Lock() + c := bs.counters + st.BlocksReceived = c.blocksRecvd + st.DupBlksReceived = c.dupBlocksRecvd + st.DupDataReceived = c.dupDataRecvd + st.BlocksSent = c.blocksSent + st.DataSent = c.dataSent + st.DataReceived = c.dataRecvd + st.MessagesReceived = c.messagesRecvd + bs.counterLk.Unlock() + + peers := bs.engine.Peers() + st.Peers = make([]string, 0, len(peers)) + + for _, p := range peers { + st.Peers = append(st.Peers, p.Pretty()) + } + sort.Strings(st.Peers) + + return st, nil +} diff --git a/testinstance/testinstance.go b/testinstance/testinstance.go new file mode 100644 index 0000000000000000000000000000000000000000..94b6f1a1cc188a6ea21fd37de51fa7595cc2fc35 --- /dev/null +++ b/testinstance/testinstance.go @@ -0,0 +1,132 @@ +package testsession + +import ( + "context" + "time" + + bitswap "gitlab.dms3.io/dms3/go-bitswap" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + tn "gitlab.dms3.io/dms3/go-bitswap/testnet" + ds "gitlab.dms3.io/dms3/go-datastore" + delayed "gitlab.dms3.io/dms3/go-datastore/delayed" + ds_sync "gitlab.dms3.io/dms3/go-datastore/sync" + blockstore "gitlab.dms3.io/dms3/go-dms3-blockstore" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" + p2ptestutil "gitlab.dms3.io/p2p/go-p2p-netutil" + tnet "gitlab.dms3.io/p2p/go-p2p-testing/net" +) + +// NewTestInstanceGenerator generates a new InstanceGenerator for the given +// testnet +func NewTestInstanceGenerator(net tn.Network, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) InstanceGenerator { + ctx, cancel := context.WithCancel(context.Background()) + return InstanceGenerator{ + net: net, + seq: 0, + ctx: ctx, // TODO take ctx as param to Next, Instances + cancel: cancel, + bsOptions: bsOptions, + netOptions: netOptions, + } +} + +// InstanceGenerator generates new test instances of bitswap+dependencies +type InstanceGenerator struct { + seq int + net tn.Network + ctx context.Context + cancel context.CancelFunc + bsOptions []bitswap.Option + netOptions []bsnet.NetOpt +} + +// Close closes the clobal context, shutting down all test instances +func (g *InstanceGenerator) Close() error { + g.cancel() + return nil // for Closer interface +} + +// Next generates a new instance of bitswap + dependencies +func (g *InstanceGenerator) Next() Instance { + g.seq++ + p, err := p2ptestutil.RandTestBogusIdentity() + if err != nil { + panic("FIXME") // TODO change signature + } + return NewInstance(g.ctx, g.net, p, g.netOptions, g.bsOptions) +} + +// Instances creates N test instances of bitswap + dependencies and connects +// them to each other +func (g *InstanceGenerator) Instances(n int) []Instance { + var instances []Instance + for j := 0; j < n; j++ { + inst := g.Next() + instances = append(instances, inst) + } + ConnectInstances(instances) + return instances +} + +// ConnectInstances connects the given instances to each other +func ConnectInstances(instances []Instance) { + for i, inst := range instances { + for j := i + 1; j < len(instances); j++ { + oinst := instances[j] + err := inst.Adapter.ConnectTo(context.Background(), oinst.Peer) + if err != nil { + panic(err.Error()) + } + } + } +} + +// Instance is a test instance of bitswap + dependencies for integration testing +type Instance struct { + Peer peer.ID + Exchange *bitswap.Bitswap + blockstore blockstore.Blockstore + Adapter bsnet.BitSwapNetwork + blockstoreDelay delay.D +} + +// Blockstore returns the block store for this test instance +func (i *Instance) Blockstore() blockstore.Blockstore { + return i.blockstore +} + +// SetBlockstoreLatency customizes the artificial delay on receiving blocks +// from a blockstore test instance. +func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { + return i.blockstoreDelay.Set(t) +} + +// NewInstance creates a test bitswap instance. +// +// NB: It's easy make mistakes by providing the same peer ID to two different +// instances. To safeguard, use the InstanceGenerator to generate instances. It's +// just a much better idea. +func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) Instance { + bsdelay := delay.Fixed(0) + + adapter := net.Adapter(p, netOptions...) + dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + + bstore, err := blockstore.CachedBlockstore(ctx, + blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), + blockstore.DefaultCacheOpts()) + if err != nil { + panic(err.Error()) // FIXME perhaps change signature and return error. + } + + bs := bitswap.New(ctx, adapter, bstore, bsOptions...).(*bitswap.Bitswap) + + return Instance{ + Adapter: adapter, + Peer: p.ID(), + Exchange: bs, + blockstore: bstore, + blockstoreDelay: bsdelay, + } +} diff --git a/testnet/interface.go b/testnet/interface.go new file mode 100644 index 0000000000000000000000000000000000000000..5e51909485f5435b6182d165796826f02de2a63b --- /dev/null +++ b/testnet/interface.go @@ -0,0 +1,16 @@ +package bitswap + +import ( + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + + "gitlab.dms3.io/p2p/go-p2p-core/peer" + tnet "gitlab.dms3.io/p2p/go-p2p-testing/net" +) + +// Network is an interface for generating bitswap network interfaces +// based on a test network. +type Network interface { + Adapter(tnet.Identity, ...bsnet.NetOpt) bsnet.BitSwapNetwork + + HasPeer(peer.ID) bool +} diff --git a/testnet/internet_latency_delay_generator.go b/testnet/internet_latency_delay_generator.go new file mode 100644 index 0000000000000000000000000000000000000000..2908194d1ae9c926e04941e6696b6fda09629690 --- /dev/null +++ b/testnet/internet_latency_delay_generator.go @@ -0,0 +1,63 @@ +package bitswap + +import ( + "math/rand" + "time" + + delay "gitlab.dms3.io/dms3/go-dms3-delay" +) + +var sharedRNG = rand.New(rand.NewSource(time.Now().UnixNano())) + +// InternetLatencyDelayGenerator generates three clusters of delays, +// typical of the type of peers you would encounter on the interenet. +// Given a base delay time T, the wait time generated will be either: +// 1. A normalized distribution around the base time +// 2. A normalized distribution around the base time plus a "medium" delay +// 3. A normalized distribution around the base time plus a "large" delay +// The size of the medium & large delays are determined when the generator +// is constructed, as well as the relative percentages with which delays fall +// into each of the three different clusters, and the standard deviation for +// the normalized distribution. +// This can be used to generate a number of scenarios typical of latency +// distribution among peers on the internet. +func InternetLatencyDelayGenerator( + mediumDelay time.Duration, + largeDelay time.Duration, + percentMedium float64, + percentLarge float64, + std time.Duration, + rng *rand.Rand) delay.Generator { + if rng == nil { + rng = sharedRNG + } + + return &internetLatencyDelayGenerator{ + mediumDelay: mediumDelay, + largeDelay: largeDelay, + percentLarge: percentLarge, + percentMedium: percentMedium, + std: std, + rng: rng, + } +} + +type internetLatencyDelayGenerator struct { + mediumDelay time.Duration + largeDelay time.Duration + percentLarge float64 + percentMedium float64 + std time.Duration + rng *rand.Rand +} + +func (d *internetLatencyDelayGenerator) NextWaitTime(t time.Duration) time.Duration { + clusterDistribution := d.rng.Float64() + baseDelay := time.Duration(d.rng.NormFloat64()*float64(d.std)) + t + if clusterDistribution < d.percentLarge { + return baseDelay + d.largeDelay + } else if clusterDistribution < d.percentMedium+d.percentLarge { + return baseDelay + d.mediumDelay + } + return baseDelay +} diff --git a/testnet/internet_latency_delay_generator_test.go b/testnet/internet_latency_delay_generator_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dcd6a92b5322d8e9845c80d7bae8e16a644fbdc1 --- /dev/null +++ b/testnet/internet_latency_delay_generator_test.go @@ -0,0 +1,69 @@ +package bitswap + +import ( + "math" + "math/rand" + "testing" + "time" +) + +const testSeed = 99 + +func TestInternetLatencyDelayNextWaitTimeDistribution(t *testing.T) { + initialValue := 1000 * time.Millisecond + deviation := 100 * time.Millisecond + mediumDelay := 1000 * time.Millisecond + largeDelay := 3000 * time.Millisecond + percentMedium := 0.2 + percentLarge := 0.4 + buckets := make(map[string]int) + internetLatencyDistributionDelay := InternetLatencyDelayGenerator( + mediumDelay, + largeDelay, + percentMedium, + percentLarge, + deviation, + rand.New(rand.NewSource(testSeed))) + + buckets["fast"] = 0 + buckets["medium"] = 0 + buckets["slow"] = 0 + buckets["outside_1_deviation"] = 0 + + // strategy here is rather than mock randomness, just use enough samples to + // get approximately the distribution you'd expect + for i := 0; i < 10000; i++ { + next := internetLatencyDistributionDelay.NextWaitTime(initialValue) + if math.Abs((next - initialValue).Seconds()) <= deviation.Seconds() { + buckets["fast"]++ + } else if math.Abs((next - initialValue - mediumDelay).Seconds()) <= deviation.Seconds() { + buckets["medium"]++ + } else if math.Abs((next - initialValue - largeDelay).Seconds()) <= deviation.Seconds() { + buckets["slow"]++ + } else { + buckets["outside_1_deviation"]++ + } + } + totalInOneDeviation := float64(10000 - buckets["outside_1_deviation"]) + oneDeviationPercentage := totalInOneDeviation / 10000 + fastPercentageResult := float64(buckets["fast"]) / totalInOneDeviation + mediumPercentageResult := float64(buckets["medium"]) / totalInOneDeviation + slowPercentageResult := float64(buckets["slow"]) / totalInOneDeviation + + // see 68-95-99 rule for normal distributions + if math.Abs(oneDeviationPercentage-0.6827) >= 0.1 { + t.Fatal("Failed to distribute values normally based on standard deviation") + } + + if math.Abs(fastPercentageResult+percentMedium+percentLarge-1) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around fast delay time") + } + + if math.Abs(mediumPercentageResult-percentMedium) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around medium delay time") + } + + if math.Abs(slowPercentageResult-percentLarge) >= 0.1 { + t.Fatal("Incorrect percentage of values distributed around slow delay time") + } +} diff --git a/testnet/network_test.go b/testnet/network_test.go new file mode 100644 index 0000000000000000000000000000000000000000..044e98db2160b5d0ee17441bf7fe06a3e37608f6 --- /dev/null +++ b/testnet/network_test.go @@ -0,0 +1,102 @@ +package bitswap + +import ( + "context" + "sync" + "testing" + + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + + blocks "gitlab.dms3.io/dms3/go-block-format" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + mockrouting "gitlab.dms3.io/dms3/go-dms3-routing/mock" + + "gitlab.dms3.io/p2p/go-p2p-core/peer" + tnet "gitlab.dms3.io/p2p/go-p2p-testing/net" +) + +func TestSendMessageAsyncButWaitForResponse(t *testing.T) { + net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) + responderPeer := tnet.RandIdentityOrFatal(t) + waiter := net.Adapter(tnet.RandIdentityOrFatal(t)) + responder := net.Adapter(responderPeer) + + var wg sync.WaitGroup + + wg.Add(1) + + expectedStr := "received async" + + responder.SetDelegate(lambda(func( + ctx context.Context, + fromWaiter peer.ID, + msgFromWaiter bsmsg.BitSwapMessage) { + + msgToWaiter := bsmsg.New(true) + msgToWaiter.AddBlock(blocks.NewBlock([]byte(expectedStr))) + err := waiter.SendMessage(ctx, fromWaiter, msgToWaiter) + if err != nil { + t.Error(err) + } + })) + + waiter.SetDelegate(lambda(func( + ctx context.Context, + fromResponder peer.ID, + msgFromResponder bsmsg.BitSwapMessage) { + + // TODO assert that this came from the correct peer and that the message contents are as expected + ok := false + for _, b := range msgFromResponder.Blocks() { + if string(b.RawData()) == expectedStr { + wg.Done() + ok = true + } + } + + if !ok { + t.Fatal("Message not received from the responder") + } + })) + + messageSentAsync := bsmsg.New(true) + messageSentAsync.AddBlock(blocks.NewBlock([]byte("data"))) + errSending := waiter.SendMessage( + context.Background(), responderPeer.ID(), messageSentAsync) + if errSending != nil { + t.Fatal(errSending) + } + + wg.Wait() // until waiter delegate function is executed +} + +type receiverFunc func(ctx context.Context, p peer.ID, + incoming bsmsg.BitSwapMessage) + +// lambda returns a Receiver instance given a receiver function +func lambda(f receiverFunc) bsnet.Receiver { + return &lambdaImpl{ + f: f, + } +} + +type lambdaImpl struct { + f func(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) +} + +func (lam *lambdaImpl) ReceiveMessage(ctx context.Context, + p peer.ID, incoming bsmsg.BitSwapMessage) { + lam.f(ctx, p, incoming) +} + +func (lam *lambdaImpl) ReceiveError(err error) { + // TODO log error +} + +func (lam *lambdaImpl) PeerConnected(p peer.ID) { + // TODO +} +func (lam *lambdaImpl) PeerDisconnected(peer.ID) { + // TODO +} diff --git a/testnet/peernet.go b/testnet/peernet.go new file mode 100644 index 0000000000000000000000000000000000000000..44c1a685231bae13231588a1830359932df52b8d --- /dev/null +++ b/testnet/peernet.go @@ -0,0 +1,44 @@ +package bitswap + +import ( + "context" + + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + + ds "gitlab.dms3.io/dms3/go-datastore" + mockrouting "gitlab.dms3.io/dms3/go-dms3-routing/mock" + + "gitlab.dms3.io/p2p/go-p2p-core/peer" + tnet "gitlab.dms3.io/p2p/go-p2p-testing/net" + mockpeernet "gitlab.dms3.io/p2p/go-p2p/p2p/net/mock" +) + +type peernet struct { + mockpeernet.Mocknet + routingserver mockrouting.Server +} + +// StreamNet is a testnet that uses p2p's MockNet +func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Server) (Network, error) { + return &peernet{net, rs}, nil +} + +func (pn *peernet) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { + client, err := pn.Mocknet.AddPeer(p.PrivateKey(), p.Address()) + if err != nil { + panic(err.Error()) + } + routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) + return bsnet.NewFromDms3Host(client, routing, opts...) +} + +func (pn *peernet) HasPeer(p peer.ID) bool { + for _, member := range pn.Mocknet.Peers() { + if p == member { + return true + } + } + return false +} + +var _ Network = (*peernet)(nil) diff --git a/testnet/rate_limit_generators.go b/testnet/rate_limit_generators.go new file mode 100644 index 0000000000000000000000000000000000000000..2c4a1cd56349d40872e393c7a257b84209999ac5 --- /dev/null +++ b/testnet/rate_limit_generators.go @@ -0,0 +1,42 @@ +package bitswap + +import ( + "math/rand" +) + +type fixedRateLimitGenerator struct { + rateLimit float64 +} + +// FixedRateLimitGenerator returns a rate limit generatoe that always generates +// the specified rate limit in bytes/sec. +func FixedRateLimitGenerator(rateLimit float64) RateLimitGenerator { + return &fixedRateLimitGenerator{rateLimit} +} + +func (rateLimitGenerator *fixedRateLimitGenerator) NextRateLimit() float64 { + return rateLimitGenerator.rateLimit +} + +type variableRateLimitGenerator struct { + rateLimit float64 + std float64 + rng *rand.Rand +} + +// VariableRateLimitGenerator makes rate limites that following a normal distribution. +func VariableRateLimitGenerator(rateLimit float64, std float64, rng *rand.Rand) RateLimitGenerator { + if rng == nil { + rng = sharedRNG + } + + return &variableRateLimitGenerator{ + std: std, + rng: rng, + rateLimit: rateLimit, + } +} + +func (rateLimitGenerator *variableRateLimitGenerator) NextRateLimit() float64 { + return rateLimitGenerator.rng.NormFloat64()*rateLimitGenerator.std + rateLimitGenerator.rateLimit +} diff --git a/testnet/virtual.go b/testnet/virtual.go new file mode 100644 index 0000000000000000000000000000000000000000..ff15d96c023f8e913cf74594f37acdf3066a55af --- /dev/null +++ b/testnet/virtual.go @@ -0,0 +1,400 @@ +package bitswap + +import ( + "context" + "errors" + "sort" + "sync" + "sync/atomic" + "time" + + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + bsnet "gitlab.dms3.io/dms3/go-bitswap/network" + + cid "gitlab.dms3.io/dms3/go-cid" + delay "gitlab.dms3.io/dms3/go-dms3-delay" + mockrouting "gitlab.dms3.io/dms3/go-dms3-routing/mock" + + "gitlab.dms3.io/p2p/go-p2p-core/connmgr" + "gitlab.dms3.io/p2p/go-p2p-core/peer" + protocol "gitlab.dms3.io/p2p/go-p2p-core/protocol" + "gitlab.dms3.io/p2p/go-p2p-core/routing" + tnet "gitlab.dms3.io/p2p/go-p2p-testing/net" + mocknet "gitlab.dms3.io/p2p/go-p2p/p2p/net/mock" + "gitlab.dms3.io/p2p/go-p2p/p2p/protocol/ping" +) + +// VirtualNetwork generates a new testnet instance - a fake network that +// is used to simulate sending messages. +func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { + return &network{ + latencies: make(map[peer.ID]map[peer.ID]time.Duration), + clients: make(map[peer.ID]*receiverQueue), + delay: d, + routingserver: rs, + isRateLimited: false, + rateLimitGenerator: nil, + conns: make(map[string]struct{}), + } +} + +// RateLimitGenerator is an interface for generating rate limits across peers +type RateLimitGenerator interface { + NextRateLimit() float64 +} + +// RateLimitedVirtualNetwork generates a testnet instance where nodes are rate +// limited in the upload/download speed. +func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenerator RateLimitGenerator) Network { + return &network{ + latencies: make(map[peer.ID]map[peer.ID]time.Duration), + rateLimiters: make(map[peer.ID]map[peer.ID]*mocknet.RateLimiter), + clients: make(map[peer.ID]*receiverQueue), + delay: d, + routingserver: rs, + isRateLimited: true, + rateLimitGenerator: rateLimitGenerator, + conns: make(map[string]struct{}), + } +} + +type network struct { + mu sync.Mutex + latencies map[peer.ID]map[peer.ID]time.Duration + rateLimiters map[peer.ID]map[peer.ID]*mocknet.RateLimiter + clients map[peer.ID]*receiverQueue + routingserver mockrouting.Server + delay delay.D + isRateLimited bool + rateLimitGenerator RateLimitGenerator + conns map[string]struct{} +} + +type message struct { + from peer.ID + msg bsmsg.BitSwapMessage + shouldSend time.Time +} + +// receiverQueue queues up a set of messages to be sent, and sends them *in +// order* with their delays respected as much as sending them in order allows +// for +type receiverQueue struct { + receiver *networkClient + queue []*message + active bool + lk sync.Mutex +} + +func (n *network) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { + n.mu.Lock() + defer n.mu.Unlock() + + s := bsnet.Settings{ + SupportedProtocols: []protocol.ID{ + bsnet.ProtocolBitswap, + bsnet.ProtocolBitswapOneOne, + bsnet.ProtocolBitswapOneZero, + bsnet.ProtocolBitswapNoVers, + }, + } + for _, opt := range opts { + opt(&s) + } + + client := &networkClient{ + local: p.ID(), + network: n, + routing: n.routingserver.Client(p), + supportedProtocols: s.SupportedProtocols, + } + n.clients[p.ID()] = &receiverQueue{receiver: client} + return client +} + +func (n *network) HasPeer(p peer.ID) bool { + n.mu.Lock() + defer n.mu.Unlock() + + _, found := n.clients[p] + return found +} + +// TODO should this be completely asynchronous? +// TODO what does the network layer do with errors received from services? +func (n *network) SendMessage( + ctx context.Context, + from peer.ID, + to peer.ID, + mes bsmsg.BitSwapMessage) error { + + mes = mes.Clone() + + n.mu.Lock() + defer n.mu.Unlock() + + latencies, ok := n.latencies[from] + if !ok { + latencies = make(map[peer.ID]time.Duration) + n.latencies[from] = latencies + } + + latency, ok := latencies[to] + if !ok { + latency = n.delay.NextWaitTime() + latencies[to] = latency + } + + var bandwidthDelay time.Duration + if n.isRateLimited { + rateLimiters, ok := n.rateLimiters[from] + if !ok { + rateLimiters = make(map[peer.ID]*mocknet.RateLimiter) + n.rateLimiters[from] = rateLimiters + } + + rateLimiter, ok := rateLimiters[to] + if !ok { + rateLimiter = mocknet.NewRateLimiter(n.rateLimitGenerator.NextRateLimit()) + rateLimiters[to] = rateLimiter + } + + size := mes.ToProtoV1().Size() + bandwidthDelay = rateLimiter.Limit(size) + } else { + bandwidthDelay = 0 + } + + receiver, ok := n.clients[to] + if !ok { + return errors.New("cannot locate peer on network") + } + + // nb: terminate the context since the context wouldn't actually be passed + // over the network in a real scenario + + msg := &message{ + from: from, + msg: mes, + shouldSend: time.Now().Add(latency).Add(bandwidthDelay), + } + receiver.enqueue(msg) + + return nil +} + +type networkClient struct { + // These need to be at the top of the struct (allocated on the heap) for alignment on 32bit platforms. + stats bsnet.Stats + + local peer.ID + bsnet.Receiver + network *network + routing routing.Routing + supportedProtocols []protocol.ID +} + +func (nc *networkClient) Self() peer.ID { + return nc.local +} + +func (nc *networkClient) Ping(ctx context.Context, p peer.ID) ping.Result { + return ping.Result{RTT: nc.Latency(p)} +} + +func (nc *networkClient) Latency(p peer.ID) time.Duration { + nc.network.mu.Lock() + defer nc.network.mu.Unlock() + return nc.network.latencies[nc.local][p] +} + +func (nc *networkClient) SendMessage( + ctx context.Context, + to peer.ID, + message bsmsg.BitSwapMessage) error { + if err := nc.network.SendMessage(ctx, nc.local, to, message); err != nil { + return err + } + atomic.AddUint64(&nc.stats.MessagesSent, 1) + return nil +} + +func (nc *networkClient) Stats() bsnet.Stats { + return bsnet.Stats{ + MessagesRecvd: atomic.LoadUint64(&nc.stats.MessagesRecvd), + MessagesSent: atomic.LoadUint64(&nc.stats.MessagesSent), + } +} + +// FindProvidersAsync returns a channel of providers for the given key. +func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { + // NB: this function duplicates the AddrInfo -> ID transformation in the + // bitswap network adapter. Not to worry. This network client will be + // deprecated once the dms3net.Mock is added. The code below is only + // temporary. + + out := make(chan peer.ID) + go func() { + defer close(out) + providers := nc.routing.FindProvidersAsync(ctx, k, max) + for info := range providers { + select { + case <-ctx.Done(): + case out <- info.ID: + } + } + }() + return out +} + +func (nc *networkClient) ConnectionManager() connmgr.ConnManager { + return &connmgr.NullConnMgr{} +} + +type messagePasser struct { + net *networkClient + target peer.ID + local peer.ID + ctx context.Context +} + +func (mp *messagePasser) SendMsg(ctx context.Context, m bsmsg.BitSwapMessage) error { + return mp.net.SendMessage(ctx, mp.target, m) +} + +func (mp *messagePasser) Close() error { + return nil +} + +func (mp *messagePasser) Reset() error { + return nil +} + +var oldProtos = map[protocol.ID]struct{}{ + bsnet.ProtocolBitswapNoVers: struct{}{}, + bsnet.ProtocolBitswapOneZero: struct{}{}, + bsnet.ProtocolBitswapOneOne: struct{}{}, +} + +func (mp *messagePasser) SupportsHave() bool { + protos := mp.net.network.clients[mp.target].receiver.supportedProtocols + for _, proto := range protos { + if _, ok := oldProtos[proto]; !ok { + return true + } + } + return false +} + +func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID, opts *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) { + return &messagePasser{ + net: nc, + target: p, + local: nc.local, + ctx: ctx, + }, nil +} + +// Provide provides the key to the network. +func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { + return nc.routing.Provide(ctx, k, true) +} + +func (nc *networkClient) SetDelegate(r bsnet.Receiver) { + nc.Receiver = r +} + +func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { + nc.network.mu.Lock() + otherClient, ok := nc.network.clients[p] + if !ok { + nc.network.mu.Unlock() + return errors.New("no such peer in network") + } + + tag := tagForPeers(nc.local, p) + if _, ok := nc.network.conns[tag]; ok { + nc.network.mu.Unlock() + // log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") + return nil + } + nc.network.conns[tag] = struct{}{} + nc.network.mu.Unlock() + + otherClient.receiver.PeerConnected(nc.local) + nc.Receiver.PeerConnected(p) + return nil +} + +func (nc *networkClient) DisconnectFrom(_ context.Context, p peer.ID) error { + nc.network.mu.Lock() + defer nc.network.mu.Unlock() + + otherClient, ok := nc.network.clients[p] + if !ok { + return errors.New("no such peer in network") + } + + tag := tagForPeers(nc.local, p) + if _, ok := nc.network.conns[tag]; !ok { + // Already disconnected + return nil + } + delete(nc.network.conns, tag) + + otherClient.receiver.PeerDisconnected(nc.local) + nc.Receiver.PeerDisconnected(p) + return nil +} + +func (rq *receiverQueue) enqueue(m *message) { + rq.lk.Lock() + defer rq.lk.Unlock() + rq.queue = append(rq.queue, m) + if !rq.active { + rq.active = true + go rq.process() + } +} + +func (rq *receiverQueue) Swap(i, j int) { + rq.queue[i], rq.queue[j] = rq.queue[j], rq.queue[i] +} + +func (rq *receiverQueue) Len() int { + return len(rq.queue) +} + +func (rq *receiverQueue) Less(i, j int) bool { + return rq.queue[i].shouldSend.UnixNano() < rq.queue[j].shouldSend.UnixNano() +} + +func (rq *receiverQueue) process() { + for { + rq.lk.Lock() + sort.Sort(rq) + if len(rq.queue) == 0 { + rq.active = false + rq.lk.Unlock() + return + } + m := rq.queue[0] + if time.Until(m.shouldSend).Seconds() < 0.1 { + rq.queue = rq.queue[1:] + rq.lk.Unlock() + time.Sleep(time.Until(m.shouldSend)) + atomic.AddUint64(&rq.receiver.stats.MessagesRecvd, 1) + rq.receiver.ReceiveMessage(context.TODO(), m.from, m.msg) + } else { + rq.lk.Unlock() + time.Sleep(100 * time.Millisecond) + } + } +} + +func tagForPeers(a, b peer.ID) string { + if a < b { + return string(a + b) + } + return string(b + a) +} diff --git a/wantlist/wantlist.go b/wantlist/wantlist.go new file mode 100644 index 0000000000000000000000000000000000000000..dd49972c003763d466f8d3eacd84c39f845936fd --- /dev/null +++ b/wantlist/wantlist.go @@ -0,0 +1,124 @@ +// Package wantlist implements an object for bitswap that contains the keys +// that a given peer wants. +package wantlist + +import ( + "sort" + + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + + cid "gitlab.dms3.io/dms3/go-cid" +) + +// Wantlist is a raw list of wanted blocks and their priorities +type Wantlist struct { + set map[cid.Cid]Entry +} + +// Entry is an entry in a want list, consisting of a cid and its priority +type Entry struct { + Cid cid.Cid + Priority int32 + WantType pb.Message_Wantlist_WantType +} + +// NewRefEntry creates a new reference tracked wantlist entry. +func NewRefEntry(c cid.Cid, p int32) Entry { + return Entry{ + Cid: c, + Priority: p, + WantType: pb.Message_Wantlist_Block, + } +} + +type entrySlice []Entry + +func (es entrySlice) Len() int { return len(es) } +func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } +func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } + +// New generates a new raw Wantlist +func New() *Wantlist { + return &Wantlist{ + set: make(map[cid.Cid]Entry), + } +} + +// Len returns the number of entries in a wantlist. +func (w *Wantlist) Len() int { + return len(w.set) +} + +// Add adds an entry in a wantlist from CID & Priority, if not already present. +func (w *Wantlist) Add(c cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] + + // Adding want-have should not override want-block + if ok && (e.WantType == pb.Message_Wantlist_Block || wantType == pb.Message_Wantlist_Have) { + return false + } + + w.set[c] = Entry{ + Cid: c, + Priority: priority, + WantType: wantType, + } + + return true +} + +// Remove removes the given cid from the wantlist. +func (w *Wantlist) Remove(c cid.Cid) bool { + _, ok := w.set[c] + if !ok { + return false + } + + delete(w.set, c) + return true +} + +// Remove removes the given cid from the wantlist, respecting the type: +// Remove with want-have will not remove an existing want-block. +func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) bool { + e, ok := w.set[c] + if !ok { + return false + } + + // Removing want-have should not remove want-block + if e.WantType == pb.Message_Wantlist_Block && wantType == pb.Message_Wantlist_Have { + return false + } + + delete(w.set, c) + return true +} + +// Contains returns the entry, if present, for the given CID, plus whether it +// was present. +func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { + e, ok := w.set[c] + return e, ok +} + +// Entries returns all wantlist entries for a want list. +func (w *Wantlist) Entries() []Entry { + es := make([]Entry, 0, len(w.set)) + for _, e := range w.set { + es = append(es, e) + } + return es +} + +// Absorb all the entries in other into this want list +func (w *Wantlist) Absorb(other *Wantlist) { + for _, e := range other.Entries() { + w.Add(e.Cid, e.Priority, e.WantType) + } +} + +// SortEntries sorts the list of entries by priority. +func SortEntries(es []Entry) { + sort.Sort(entrySlice(es)) +} diff --git a/wantlist/wantlist_test.go b/wantlist/wantlist_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8a3dcb4275daaa4a0aed21a1accb72cd85f594a0 --- /dev/null +++ b/wantlist/wantlist_test.go @@ -0,0 +1,221 @@ +package wantlist + +import ( + "testing" + + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + cid "gitlab.dms3.io/dms3/go-cid" +) + +var testcids []cid.Cid + +func init() { + strs := []string{ + "QmQL8LqkEgYXaDHdNYCG2mmpow7Sp8Z8Kt3QS688vyBeC7", + "QmcBDsdjgSXU7BP4A4V8LJCXENE5xVwnhrhRGVTJr9YCVj", + "QmQakgd2wDxc3uUF4orGdEm28zUT9Mmimp5pyPG2SFS9Gj", + } + for _, s := range strs { + c, err := cid.Decode(s) + if err != nil { + panic(err) + } + testcids = append(testcids, c) + } + +} + +type wli interface { + Contains(cid.Cid) (Entry, bool) +} + +func assertHasCid(t *testing.T, w wli, c cid.Cid) { + e, ok := w.Contains(c) + if !ok { + t.Fatal("expected to have ", c) + } + if !e.Cid.Equals(c) { + t.Fatal("returned entry had wrong cid value") + } +} + +func TestBasicWantlist(t *testing.T) { + wl := New() + + if !wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) { + t.Fatal("expected true") + } + assertHasCid(t, wl, testcids[0]) + if !wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { + t.Fatal("expected true") + } + assertHasCid(t, wl, testcids[0]) + assertHasCid(t, wl, testcids[1]) + + if wl.Len() != 2 { + t.Fatal("should have had two items") + } + + if wl.Add(testcids[1], 4, pb.Message_Wantlist_Block) { + t.Fatal("add shouldnt report success on second add") + } + assertHasCid(t, wl, testcids[0]) + assertHasCid(t, wl, testcids[1]) + + if wl.Len() != 2 { + t.Fatal("should have had two items") + } + + if !wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) { + t.Fatal("should have gotten true") + } + + assertHasCid(t, wl, testcids[1]) + if _, has := wl.Contains(testcids[0]); has { + t.Fatal("shouldnt have this cid") + } +} + +func TestAddHaveThenBlock(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddBlockThenHave(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveBlock(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Block) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveHave(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.RemoveType(testcids[0], pb.Message_Wantlist_Have) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected to be ", pb.Message_Wantlist_Block) + } +} + +func TestAddHaveThenRemoveAny(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Have) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAddBlockThenRemoveAny(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Remove(testcids[0]) + + _, ok := wl.Contains(testcids[0]) + if ok { + t.Fatal("expected not to have ", testcids[0]) + } +} + +func TestAbsort(t *testing.T) { + wl := New() + wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 4, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 3, pb.Message_Wantlist_Have) + + wl2 := New() + wl2.Add(testcids[0], 2, pb.Message_Wantlist_Have) + wl2.Add(testcids[1], 1, pb.Message_Wantlist_Block) + + wl.Absorb(wl2) + + e, ok := wl.Contains(testcids[0]) + if !ok { + t.Fatal("expected to have ", testcids[0]) + } + if e.Priority != 5 { + t.Fatal("expected priority 5") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[1]) + if !ok { + t.Fatal("expected to have ", testcids[1]) + } + if e.Priority != 1 { + t.Fatal("expected priority 1") + } + if e.WantType != pb.Message_Wantlist_Block { + t.Fatal("expected type ", pb.Message_Wantlist_Block) + } + + e, ok = wl.Contains(testcids[2]) + if !ok { + t.Fatal("expected to have ", testcids[2]) + } + if e.Priority != 3 { + t.Fatal("expected priority 3") + } + if e.WantType != pb.Message_Wantlist_Have { + t.Fatal("expected type ", pb.Message_Wantlist_Have) + } +} + +func TestSortEntries(t *testing.T) { + wl := New() + + wl.Add(testcids[0], 3, pb.Message_Wantlist_Block) + wl.Add(testcids[1], 5, pb.Message_Wantlist_Have) + wl.Add(testcids[2], 4, pb.Message_Wantlist_Have) + + entries := wl.Entries() + SortEntries(entries) + + if !entries[0].Cid.Equals(testcids[1]) || + !entries[1].Cid.Equals(testcids[2]) || + !entries[2].Cid.Equals(testcids[0]) { + t.Fatal("wrong order") + } +} diff --git a/wiretap.go b/wiretap.go new file mode 100644 index 0000000000000000000000000000000000000000..f812df76a551a92a63d7b926bf0841d92659adb8 --- /dev/null +++ b/wiretap.go @@ -0,0 +1,27 @@ +package bitswap + +import ( + bsmsg "gitlab.dms3.io/dms3/go-bitswap/message" + peer "gitlab.dms3.io/p2p/go-p2p-core/peer" +) + +// WireTap provides methods to access all messages sent and received by Bitswap. +// This interface can be used to implement various statistics (this is original intent). +type WireTap interface { + MessageReceived(peer.ID, bsmsg.BitSwapMessage) + MessageSent(peer.ID, bsmsg.BitSwapMessage) +} + +// Configures Bitswap to use given wiretap. +func EnableWireTap(tap WireTap) Option { + return func(bs *Bitswap) { + bs.wiretap = tap + } +} + +// Configures Bitswap not to use any wiretap. +func DisableWireTap() Option { + return func(bs *Bitswap) { + bs.wiretap = nil + } +} diff --git a/workers.go b/workers.go new file mode 100644 index 0000000000000000000000000000000000000000..2d4c08acf81a36c457084b770b9abf336425d395 --- /dev/null +++ b/workers.go @@ -0,0 +1,224 @@ +package bitswap + +import ( + "context" + "fmt" + + process "github.com/jbenet/goprocess" + procctx "github.com/jbenet/goprocess/context" + engine "gitlab.dms3.io/dms3/go-bitswap/internal/decision" + pb "gitlab.dms3.io/dms3/go-bitswap/message/pb" + cid "gitlab.dms3.io/dms3/go-cid" + "go.uber.org/zap" +) + +// TaskWorkerCount is the total number of simultaneous threads sending +// outgoing messages +var TaskWorkerCount = 8 + +func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) { + + // Start up workers to handle requests from other nodes for the data on this node + for i := 0; i < TaskWorkerCount; i++ { + i := i + px.Go(func(px process.Process) { + bs.taskWorker(ctx, i) + }) + } + + if bs.provideEnabled { + // Start up a worker to manage sending out provides messages + px.Go(func(px process.Process) { + bs.provideCollector(ctx) + }) + + // Spawn up multiple workers to handle incoming blocks + // consider increasing number if providing blocks bottlenecks + // file transfers + px.Go(bs.provideWorker) + } +} + +func (bs *Bitswap) taskWorker(ctx context.Context, id int) { + defer log.Debug("bitswap task worker shutting down...") + log := log.With("ID", id) + for { + log.Debug("Bitswap.TaskWorker.Loop") + select { + case nextEnvelope := <-bs.engine.Outbox(): + select { + case envelope, ok := <-nextEnvelope: + if !ok { + continue + } + + // TODO: Only record message as sent if there was no error? + // Ideally, yes. But we'd need some way to trigger a retry and/or drop + // the peer. + bs.engine.MessageSent(envelope.Peer, envelope.Message) + if bs.wiretap != nil { + bs.wiretap.MessageSent(envelope.Peer, envelope.Message) + } + bs.sendBlocks(ctx, envelope) + case <-ctx.Done(): + return + } + case <-ctx.Done(): + return + } + } +} + +func (bs *Bitswap) logOutgoingBlocks(env *engine.Envelope) { + if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { + return + } + + self := bs.network.Self() + + for _, blockPresence := range env.Message.BlockPresences() { + c := blockPresence.Cid + switch blockPresence.Type { + case pb.Message_Have: + log.Debugw("sent message", + "type", "HAVE", + "cid", c, + "local", self, + "to", env.Peer, + ) + case pb.Message_DontHave: + log.Debugw("sent message", + "type", "DONT_HAVE", + "cid", c, + "local", self, + "to", env.Peer, + ) + default: + panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) + } + + } + for _, block := range env.Message.Blocks() { + log.Debugw("sent message", + "type", "BLOCK", + "cid", block.Cid(), + "local", self, + "to", env.Peer, + ) + } +} + +func (bs *Bitswap) sendBlocks(ctx context.Context, env *engine.Envelope) { + // Blocks need to be sent synchronously to maintain proper backpressure + // throughout the network stack + defer env.Sent() + + err := bs.network.SendMessage(ctx, env.Peer, env.Message) + if err != nil { + log.Debugw("failed to send blocks message", + "peer", env.Peer, + "error", err, + ) + return + } + + bs.logOutgoingBlocks(env) + + dataSent := 0 + blocks := env.Message.Blocks() + for _, b := range blocks { + dataSent += len(b.RawData()) + } + bs.counterLk.Lock() + bs.counters.blocksSent += uint64(len(blocks)) + bs.counters.dataSent += uint64(dataSent) + bs.counterLk.Unlock() + bs.sentHistogram.Observe(float64(env.Message.Size())) + log.Debugw("sent message", "peer", env.Peer) +} + +func (bs *Bitswap) provideWorker(px process.Process) { + // FIXME: OnClosingContext returns a _custom_ context type. + // Unfortunately, deriving a new cancelable context from this custom + // type fires off a goroutine. To work around this, we create a single + // cancelable context up-front and derive all sub-contexts from that. + // + // See: https://gitlab.dms3.io/dms3/go-dms3/issues/5810 + ctx := procctx.OnClosingContext(px) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + limit := make(chan struct{}, provideWorkerMax) + + limitedGoProvide := func(k cid.Cid, wid int) { + defer func() { + // replace token when done + <-limit + }() + + log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) + defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) + + ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx + defer cancel() + + if err := bs.network.Provide(ctx, k); err != nil { + log.Warn(err) + } + } + + // worker spawner, reads from bs.provideKeys until it closes, spawning a + // _ratelimited_ number of workers to handle each key. + for wid := 2; ; wid++ { + log.Debug("Bitswap.ProvideWorker.Loop") + + select { + case <-px.Closing(): + return + case k, ok := <-bs.provideKeys: + if !ok { + log.Debug("provideKeys channel closed") + return + } + select { + case <-px.Closing(): + return + case limit <- struct{}{}: + go limitedGoProvide(k, wid) + } + } + } +} + +func (bs *Bitswap) provideCollector(ctx context.Context) { + defer close(bs.provideKeys) + var toProvide []cid.Cid + var nextKey cid.Cid + var keysOut chan cid.Cid + + for { + select { + case blkey, ok := <-bs.newBlocks: + if !ok { + log.Debug("newBlocks channel closed") + return + } + + if keysOut == nil { + nextKey = blkey + keysOut = bs.provideKeys + } else { + toProvide = append(toProvide, blkey) + } + case keysOut <- nextKey: + if len(toProvide) > 0 { + nextKey = toProvide[0] + toProvide = toProvide[1:] + } else { + keysOut = nil + } + case <-ctx.Done(): + return + } + } +}