diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml new file mode 100644 index 0000000000000000000000000000000000000000..840618d88f2fdc33972facbed16a84929643bf6f --- /dev/null +++ b/.github/workflows/automerge.yml @@ -0,0 +1,27 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +# Automatically merge pull requests opened by web3-bot, as soon as (and only if) all tests pass. +# This reduces the friction associated with updating with our workflows. + +on: [ pull_request ] + +jobs: + automerge: + if: github.event.pull_request.user.login == 'web3-bot' + runs-on: ubuntu-latest + steps: + - name: Wait on tests + uses: lewagon/wait-on-check-action@bafe56a6863672c681c3cf671f5e10b20abf2eaa # v0.2 + with: + ref: ${{ github.event.pull_request.head.sha }} + repo-token: ${{ secrets.GITHUB_TOKEN }} + wait-interval: 10 + running-workflow-name: 'automerge' # the name of this job + - name: Merge PR + uses: pascalgn/automerge-action@741c311a47881be9625932b0a0de1b0937aab1ae # v0.13.1 + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + MERGE_LABELS: "" + MERGE_METHOD: "squash" + MERGE_DELETE_BRANCH: true diff --git a/.github/workflows/go-check.yml b/.github/workflows/go-check.yml new file mode 100644 index 0000000000000000000000000000000000000000..862d49fc9f3443177b1d3e78e158f24340a3e980 --- /dev/null +++ b/.github/workflows/go-check.yml @@ -0,0 +1,41 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +on: [push, pull_request] + +jobs: + unit: + runs-on: ubuntu-latest + name: Go checks + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 + with: + go-version: "1.16.x" + - name: Install staticcheck + run: go install honnef.co/go/tools/cmd/staticcheck@be534f007836a777104a15f2456cd1fffd3ddee8 # v2020.2.2 + - name: Check that go.mod is tidy + run: | + go mod tidy + if [[ -n $(git ls-files --other --exclude-standard --directory -- go.sum) ]]; then + echo "go.sum was added by go mod tidy" + exit 1 + fi + git diff --exit-code -- go.sum go.mod + - name: gofmt + if: ${{ success() || failure() }} # run this step even if the previous one failed + run: | + out=$(gofmt -s -l .) + if [[ -n "$out" ]]; then + echo $out | awk '{print "::error file=" $0 ",line=0,col=0::File is not gofmt-ed."}' + exit 1 + fi + - name: go vet + if: ${{ success() || failure() }} # run this step even if the previous one failed + run: go vet ./... + - name: staticcheck + if: ${{ success() || failure() }} # run this step even if the previous one failed + run: | + set -o pipefail + staticcheck ./... | sed -e 's@\(.*\)\.go@./\1.go@g' + diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml new file mode 100644 index 0000000000000000000000000000000000000000..9b384208624b2615489e028c4cec15efd1557ee3 --- /dev/null +++ b/.github/workflows/go-test.yml @@ -0,0 +1,38 @@ +# File managed by web3-bot. DO NOT EDIT. +# See https://github.com/protocol/.github/ for details. + +on: [push, pull_request] + +jobs: + unit: + strategy: + fail-fast: false + matrix: + os: [ "ubuntu", "windows", "macos" ] + go: [ "1.15.x", "1.16.x" ] + runs-on: ${{ matrix.os }}-latest + name: Unit tests (${{ matrix.os}}, Go ${{ matrix.go }}) + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + - name: Go information + run: | + go version + go env + - name: Run tests + run: go test -v -coverprofile coverage.txt ./... + - name: Run tests (32 bit) + if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX. + env: + GOARCH: 386 + run: go test -v ./... + - name: Run tests with race detector + if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow + run: go test -v -race ./... + - name: Upload coverage to Codecov + uses: codecov/codecov-action@967e2b38a85a62bd61be5529ada27ebc109948c2 # v1.4.1 + with: + file: coverage.txt + env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..26100332ba2ca84bd6e87bdf034ab6bb18dceb99 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Jeromy Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/README.md b/README.md index 44ce4e3c06ca5a04158188489ad0fdfcd806c4de..528725d490551d1f2b58cef3476039bc45bc6fca 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,156 @@ -# go-p2p-swarm +go-libp2p-swarm +================== -dms3 p2p swarm \ No newline at end of file +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai) +[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/) +[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23libp2p) +[![Coverage Status](https://coveralls.io/repos/github/libp2p/go-libp2p-swarm/badge.svg?branch=master)](https://coveralls.io/github/libp2p/go-libp2p-swarm?branch=master) +[![Travis CI](https://travis-ci.org/libp2p/go-libp2p-swarm.svg?branch=master)](https://travis-ci.org/libp2p/go-libp2p-swarm) +[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) + +> The libp2p swarm manages groups of connections to peers, and handles incoming and outgoing streams. + +The libp2p swarm is the 'low level' interface for working with a given libp2p +network. It gives you more fine grained control over various aspects of the +system. Most applications don't need this level of access, so the `Swarm` is +generally wrapped in a `Host` abstraction that provides a more friendly +interface. See [the host interface](https://godoc.org/github.com/libp2p/go-libp2p-core/host#Host) +for more info on that. + +## Table of Contents + +- [Install](#install) +- [Contribute](#contribute) +- [License](#license) + +## Install + +```sh +make install +``` + +## Usage + +### Creating a swarm +To construct a swarm, you'll be calling `NewSwarm`. That function looks like this: +```go +swarm, err := NewSwarm(ctx, laddrs, pid, pstore, bwc) +``` + +It takes five items to fully construct a swarm, the first is a go +`context.Context`. This controls the lifetime of the swarm, and all swarm +processes have their lifespan derived from the given context. You can just use +`context.Background()` if you're not concerned with that. + +The next argument is an array of multiaddrs that the swarm will open up +listeners for. Once started, the swarm will start accepting and handling +incoming connections on every given address. This argument is optional, you can +pass `nil` and the swarm will not listen for any incoming connections (but will +still be able to dial out to other peers). + +After that, you'll need to give the swarm an identity in the form of a peer.ID. +If you're not wanting to enable secio (libp2p's transport layer encryption), +then you can pick any string for this value. For example `peer.ID("FooBar123")` +would work. Note that passing a random string ID will result in your node not +being able to communicate with other peers that have correctly generated IDs. +To see how to generate a proper ID, see the below section on "Identity +Generation". + +The fourth argument is a peerstore. This is essentially a database that the +swarm will use to store peer IDs, addresses, public keys, protocol preferences +and more. You can construct one by importing +`github.com/libp2p/go-libp2p-peerstore` and calling `peerstore.NewPeerstore()`. + +The final argument is a bandwidth metrics collector, This is used to track +incoming and outgoing bandwidth on connections managed by this swarm. It is +optional, and passing `nil` will simply result in no metrics for connections +being available. + +#### Identity Generation +A proper libp2p identity is PKI based. We currently have support for RSA and ed25519 keys. To create a 'correct' ID, you'll need to either load or generate a new keypair. Here is an example of doing so: + +```go +import ( + "fmt" + "crypto/rand" + + ci "github.com/libp2p/go-libp2p-crypto" + pstore "github.com/libp2p/go-libp2p-peerstore" + peer "github.com/libp2p/go-libp2p-peer" +) + +func demo() { + // First, select a source of entropy. We're using the stdlib's crypto reader here + src := rand.Reader + + // Now create a 2048 bit RSA key using that + priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 2048, src) + if err != nil { + panic(err) // oh no! + } + + // Now that we have a keypair, lets create our identity from it + pid, err := peer.IDFromPrivateKey(priv) + if err != nil { + panic(err) + } + + // Woo! Identity acquired! + fmt.Println("I am ", pid) + + // Now, for the purposes of building a swarm, lets add this all to a peerstore. + ps := pstore.NewPeerstore() + ps.AddPubKey(pid, pub) + ps.AddPrivKey(pid, priv) + + // Once you've got all that, creating a basic swarm can be as easy as + ctx := context.Background() + swarm, err := NewSwarm(ctx, nil, pid, ps, nil) + + // voila! A functioning swarm! +} +``` + +### Streams +The swarm is designed around using multiplexed streams to communicate with +other peers. When working with a swarm, you will want to set a function to +handle incoming streams from your peers: + +```go +swrm.SetStreamHandler(func(s inet.Stream) { + defer s.Close() + fmt.Println("Got a stream from: ", s.SwarmConn().RemotePeer()) + fmt.Fprintln(s, "Hello Friend!") +}) +``` + +Tip: Always make sure to close streams when you're done with them. + +Opening streams is also pretty simple: +```go +s, err := swrm.NewStreamWithPeer(ctx, rpid) +if err != nil { + panic(err) +} +defer s.Close() + +io.Copy(os.Stdout, s) // pipe the stream to stdout +``` + +Just pass a context and the ID of the peer you want a stream to, and you'll get +back a stream to read and write on. + + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Jeromy Johnson + +--- + +The last gx published version of this module was: 3.0.35: QmQVoMEL1CxrVusTSUdYsiJXVBnvSqNUpBsGybkwSfksEF diff --git a/addrs.go b/addrs.go new file mode 100644 index 0000000000000000000000000000000000000000..b9068720c55d563530a6ddfbfde8e4f8c0d8a4ab --- /dev/null +++ b/addrs.go @@ -0,0 +1,36 @@ +package swarm + +import ( + filter "github.com/libp2p/go-maddr-filter" + ma "github.com/multiformats/go-multiaddr" + mamask "github.com/whyrusleeping/multiaddr-filter" +) + +// http://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml +var lowTimeoutFilters = ma.NewFilters() + +func init() { + for _, p := range []string{ + "/ip4/10.0.0.0/ipcidr/8", + "/ip4/100.64.0.0/ipcidr/10", + "/ip4/169.254.0.0/ipcidr/16", + "/ip4/172.16.0.0/ipcidr/12", + "/ip4/192.0.0.0/ipcidr/24", + "/ip4/192.0.0.0/ipcidr/29", + "/ip4/192.0.0.8/ipcidr/32", + "/ip4/192.0.0.170/ipcidr/32", + "/ip4/192.0.0.171/ipcidr/32", + "/ip4/192.0.2.0/ipcidr/24", + "/ip4/192.168.0.0/ipcidr/16", + "/ip4/198.18.0.0/ipcidr/15", + "/ip4/198.51.100.0/ipcidr/24", + "/ip4/203.0.113.0/ipcidr/24", + "/ip4/240.0.0.0/ipcidr/4", + } { + f, err := mamask.NewMask(p) + if err != nil { + panic("error in lowTimeoutFilters init: " + err.Error()) + } + lowTimeoutFilters.AddFilter(*f, filter.ActionDeny) + } +} diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000000000000000000000000000000000..5f88a9ea2785f8dfafe65d5c5fa9663de93ff423 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/dial_error.go b/dial_error.go new file mode 100644 index 0000000000000000000000000000000000000000..f2986348bf9f67069db37f518aed43dd88f2057e --- /dev/null +++ b/dial_error.go @@ -0,0 +1,71 @@ +package swarm + +import ( + "fmt" + "os" + "strings" + + "github.com/libp2p/go-libp2p-core/peer" + + ma "github.com/multiformats/go-multiaddr" +) + +// maxDialDialErrors is the maximum number of dial errors we record +const maxDialDialErrors = 16 + +// DialError is the error type returned when dialing. +type DialError struct { + Peer peer.ID + DialErrors []TransportError + Cause error + Skipped int +} + +func (e *DialError) Timeout() bool { + return os.IsTimeout(e.Cause) +} + +func (e *DialError) recordErr(addr ma.Multiaddr, err error) { + if len(e.DialErrors) >= maxDialDialErrors { + e.Skipped++ + return + } + e.DialErrors = append(e.DialErrors, TransportError{ + Address: addr, + Cause: err, + }) +} + +func (e *DialError) Error() string { + var builder strings.Builder + fmt.Fprintf(&builder, "failed to dial %s:", e.Peer) + if e.Cause != nil { + fmt.Fprintf(&builder, " %s", e.Cause) + } + for _, te := range e.DialErrors { + fmt.Fprintf(&builder, "\n * [%s] %s", te.Address, te.Cause) + } + if e.Skipped > 0 { + fmt.Fprintf(&builder, "\n ... skipping %d errors ...", e.Skipped) + } + return builder.String() +} + +// Unwrap implements https://godoc.org/golang.org/x/xerrors#Wrapper. +func (e *DialError) Unwrap() error { + return e.Cause +} + +var _ error = (*DialError)(nil) + +// TransportError is the error returned when dialing a specific address. +type TransportError struct { + Address ma.Multiaddr + Cause error +} + +func (e *TransportError) Error() string { + return fmt.Sprintf("failed to dial %s: %s", e.Address, e.Cause) +} + +var _ error = (*TransportError)(nil) diff --git a/dial_sync.go b/dial_sync.go new file mode 100644 index 0000000000000000000000000000000000000000..e334ef5a258f9f40f3f6fc7ff01ead34514c966e --- /dev/null +++ b/dial_sync.go @@ -0,0 +1,122 @@ +package swarm + +import ( + "context" + "sync" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" +) + +// DialWorerFunc is used by DialSync to spawn a new dial worker +type dialWorkerFunc func(context.Context, peer.ID, <-chan dialRequest) error + +// newDialSync constructs a new DialSync +func newDialSync(worker dialWorkerFunc) *DialSync { + return &DialSync{ + dials: make(map[peer.ID]*activeDial), + dialWorker: worker, + } +} + +// DialSync is a dial synchronization helper that ensures that at most one dial +// to any given peer is active at any given time. +type DialSync struct { + dials map[peer.ID]*activeDial + dialsLk sync.Mutex + dialWorker dialWorkerFunc +} + +type activeDial struct { + id peer.ID + refCnt int + + ctx context.Context + cancel func() + + reqch chan dialRequest + + ds *DialSync +} + +func (ad *activeDial) decref() { + ad.ds.dialsLk.Lock() + ad.refCnt-- + if ad.refCnt == 0 { + ad.cancel() + close(ad.reqch) + delete(ad.ds.dials, ad.id) + } + ad.ds.dialsLk.Unlock() +} + +func (ad *activeDial) dial(ctx context.Context, p peer.ID) (*Conn, error) { + dialCtx := ad.ctx + + if forceDirect, reason := network.GetForceDirectDial(ctx); forceDirect { + dialCtx = network.WithForceDirectDial(dialCtx, reason) + } + if simConnect, reason := network.GetSimultaneousConnect(ctx); simConnect { + dialCtx = network.WithSimultaneousConnect(dialCtx, reason) + } + + resch := make(chan dialResponse, 1) + select { + case ad.reqch <- dialRequest{ctx: dialCtx, resch: resch}: + case <-ctx.Done(): + return nil, ctx.Err() + } + + select { + case res := <-resch: + return res.conn, res.err + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (ds *DialSync) getActiveDial(p peer.ID) (*activeDial, error) { + ds.dialsLk.Lock() + defer ds.dialsLk.Unlock() + + actd, ok := ds.dials[p] + if !ok { + // This code intentionally uses the background context. Otherwise, if the first call + // to Dial is canceled, subsequent dial calls will also be canceled. + // XXX: this also breaks direct connection logic. We will need to pipe the + // information through some other way. + adctx, cancel := context.WithCancel(context.Background()) + actd = &activeDial{ + id: p, + ctx: adctx, + cancel: cancel, + reqch: make(chan dialRequest), + ds: ds, + } + + err := ds.dialWorker(adctx, p, actd.reqch) + if err != nil { + cancel() + return nil, err + } + + ds.dials[p] = actd + } + + // increase ref count before dropping dialsLk + actd.refCnt++ + + return actd, nil +} + +// DialLock initiates a dial to the given peer if there are none in progress +// then waits for the dial to that peer to complete. +func (ds *DialSync) DialLock(ctx context.Context, p peer.ID) (*Conn, error) { + ad, err := ds.getActiveDial(p) + if err != nil { + return nil, err + } + + defer ad.decref() + return ad.dial(ctx, p) +} diff --git a/dial_sync_test.go b/dial_sync_test.go new file mode 100644 index 0000000000000000000000000000000000000000..59ace9ae672131367c253a37c87f73af1ec2e7cd --- /dev/null +++ b/dial_sync_test.go @@ -0,0 +1,293 @@ +package swarm + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/peer" +) + +func getMockDialFunc() (dialWorkerFunc, func(), context.Context, <-chan struct{}) { + dfcalls := make(chan struct{}, 512) // buffer it large enough that we won't care + dialctx, cancel := context.WithCancel(context.Background()) + ch := make(chan struct{}) + f := func(ctx context.Context, p peer.ID, reqch <-chan dialRequest) error { + dfcalls <- struct{}{} + go func() { + defer cancel() + for { + select { + case req, ok := <-reqch: + if !ok { + return + } + + select { + case <-ch: + req.resch <- dialResponse{conn: new(Conn)} + case <-ctx.Done(): + req.resch <- dialResponse{err: ctx.Err()} + return + } + case <-ctx.Done(): + return + } + } + }() + return nil + } + + o := new(sync.Once) + + return f, func() { o.Do(func() { close(ch) }) }, dialctx, dfcalls +} + +func TestBasicDialSync(t *testing.T) { + df, done, _, callsch := getMockDialFunc() + + dsync := newDialSync(df) + + p := peer.ID("testpeer") + + ctx := context.Background() + + finished := make(chan struct{}) + go func() { + _, err := dsync.DialLock(ctx, p) + if err != nil { + t.Error(err) + } + finished <- struct{}{} + }() + + go func() { + _, err := dsync.DialLock(ctx, p) + if err != nil { + t.Error(err) + } + finished <- struct{}{} + }() + + // short sleep just to make sure we've moved around in the scheduler + time.Sleep(time.Millisecond * 20) + done() + + <-finished + <-finished + + if len(callsch) > 1 { + t.Fatal("should only have called dial func once!") + } +} + +func TestDialSyncCancel(t *testing.T) { + df, done, _, dcall := getMockDialFunc() + + dsync := newDialSync(df) + + p := peer.ID("testpeer") + + ctx1, cancel1 := context.WithCancel(context.Background()) + + finished := make(chan struct{}) + go func() { + _, err := dsync.DialLock(ctx1, p) + if err != ctx1.Err() { + t.Error("should have gotten context error") + } + finished <- struct{}{} + }() + + // make sure the above makes it through the wait code first + select { + case <-dcall: + case <-time.After(time.Second): + t.Fatal("timed out waiting for dial to start") + } + + // Add a second dialwait in so two actors are waiting on the same dial + go func() { + _, err := dsync.DialLock(context.Background(), p) + if err != nil { + t.Error(err) + } + finished <- struct{}{} + }() + + time.Sleep(time.Millisecond * 20) + + // cancel the first dialwait, it should not affect the second at all + cancel1() + select { + case <-finished: + case <-time.After(time.Second): + t.Fatal("timed out waiting for wait to exit") + } + + // short sleep just to make sure we've moved around in the scheduler + time.Sleep(time.Millisecond * 20) + done() + + <-finished +} + +func TestDialSyncAllCancel(t *testing.T) { + df, done, dctx, _ := getMockDialFunc() + + dsync := newDialSync(df) + + p := peer.ID("testpeer") + + ctx1, cancel1 := context.WithCancel(context.Background()) + + finished := make(chan struct{}) + go func() { + _, err := dsync.DialLock(ctx1, p) + if err != ctx1.Err() { + t.Error("should have gotten context error") + } + finished <- struct{}{} + }() + + // Add a second dialwait in so two actors are waiting on the same dial + go func() { + _, err := dsync.DialLock(ctx1, p) + if err != ctx1.Err() { + t.Error("should have gotten context error") + } + finished <- struct{}{} + }() + + cancel1() + for i := 0; i < 2; i++ { + select { + case <-finished: + case <-time.After(time.Second): + t.Fatal("timed out waiting for wait to exit") + } + } + + // the dial should have exited now + select { + case <-dctx.Done(): + case <-time.After(time.Second): + t.Fatal("timed out waiting for dial to return") + } + + // should be able to successfully dial that peer again + done() + _, err := dsync.DialLock(context.Background(), p) + if err != nil { + t.Fatal(err) + } +} + +func TestFailFirst(t *testing.T) { + var count int + f := func(ctx context.Context, p peer.ID, reqch <-chan dialRequest) error { + go func() { + for { + select { + case req, ok := <-reqch: + if !ok { + return + } + + if count > 0 { + req.resch <- dialResponse{conn: new(Conn)} + } else { + req.resch <- dialResponse{err: fmt.Errorf("gophers ate the modem")} + } + count++ + + case <-ctx.Done(): + return + } + } + }() + return nil + } + + ds := newDialSync(f) + + p := peer.ID("testing") + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + _, err := ds.DialLock(ctx, p) + if err == nil { + t.Fatal("expected gophers to have eaten the modem") + } + + c, err := ds.DialLock(ctx, p) + if err != nil { + t.Fatal(err) + } + + if c == nil { + t.Fatal("should have gotten a 'real' conn back") + } +} + +func TestStressActiveDial(t *testing.T) { + ds := newDialSync(func(ctx context.Context, p peer.ID, reqch <-chan dialRequest) error { + go func() { + for { + select { + case req, ok := <-reqch: + if !ok { + return + } + + req.resch <- dialResponse{} + case <-ctx.Done(): + return + } + } + }() + return nil + }) + + wg := sync.WaitGroup{} + + pid := peer.ID("foo") + + makeDials := func() { + for i := 0; i < 10000; i++ { + ds.DialLock(context.Background(), pid) + } + wg.Done() + } + + for i := 0; i < 100; i++ { + wg.Add(1) + go makeDials() + } + + wg.Wait() +} + +func TestDialSelf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + self := peer.ID("ABC") + s := NewSwarm(ctx, self, nil, nil) + defer s.Close() + + // this should fail + _, err := s.dsync.DialLock(ctx, self) + if err != ErrDialToSelf { + t.Fatal("expected error from self dial") + } + + // do it twice to make sure we get a new active dial object that fails again + _, err = s.dsync.DialLock(ctx, self) + if err != ErrDialToSelf { + t.Fatal("expected error from self dial") + } +} diff --git a/dial_test.go b/dial_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6258d0eda124c3466a6e7c64c06f2526b9bfaef9 --- /dev/null +++ b/dial_test.go @@ -0,0 +1,687 @@ +package swarm_test + +import ( + "context" + "net" + "sync" + "testing" + "time" + + addrutil "github.com/libp2p/go-addr-util" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p-core/transport" + + testutil "github.com/libp2p/go-libp2p-core/test" + swarmt "github.com/libp2p/go-libp2p-swarm/testing" + "github.com/libp2p/go-libp2p-testing/ci" + + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + + . "github.com/libp2p/go-libp2p-swarm" +) + +func init() { + transport.DialTimeout = time.Second +} + +func closeSwarms(swarms []*Swarm) { + for _, s := range swarms { + s.Close() + } +} + +func TestBasicDialPeer(t *testing.T) { + t.Parallel() + ctx := context.Background() + + swarms := makeSwarms(ctx, t, 2) + defer closeSwarms(swarms) + s1 := swarms[0] + s2 := swarms[1] + + s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL) + + c, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err != nil { + t.Fatal(err) + } + + s, err := c.NewStream(ctx) + if err != nil { + t.Fatal(err) + } + + s.Close() +} + +func TestDialWithNoListeners(t *testing.T) { + t.Parallel() + ctx := context.Background() + + s1 := makeDialOnlySwarm(ctx, t) + + swarms := makeSwarms(ctx, t, 1) + defer closeSwarms(swarms) + s2 := swarms[0] + + s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL) + + c, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err != nil { + t.Fatal(err) + } + + s, err := c.NewStream(ctx) + if err != nil { + t.Fatal(err) + } + + s.Close() +} + +func acceptAndHang(l net.Listener) { + conns := make([]net.Conn, 0, 10) + for { + c, err := l.Accept() + if err != nil { + break + } + if c != nil { + conns = append(conns, c) + } + } + for _, c := range conns { + c.Close() + } +} + +func TestSimultDials(t *testing.T) { + // t.Skip("skipping for another test") + t.Parallel() + + ctx := context.Background() + swarms := makeSwarms(ctx, t, 2, swarmt.OptDisableReuseport) + + // connect everyone + { + var wg sync.WaitGroup + connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) { + // copy for other peer + log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.LocalPeer(), dst, addr) + s.Peerstore().AddAddr(dst, addr, peerstore.TempAddrTTL) + if _, err := s.DialPeer(ctx, dst); err != nil { + t.Fatal("error swarm dialing to peer", err) + } + wg.Done() + } + + ifaceAddrs0, err := swarms[0].InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + ifaceAddrs1, err := swarms[1].InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + + log.Info("Connecting swarms simultaneously.") + for i := 0; i < 10; i++ { // connect 10x for each. + wg.Add(2) + go connect(swarms[0], swarms[1].LocalPeer(), ifaceAddrs1[0]) + go connect(swarms[1], swarms[0].LocalPeer(), ifaceAddrs0[0]) + } + wg.Wait() + } + + // should still just have 1, at most 2 connections :) + c01l := len(swarms[0].ConnsToPeer(swarms[1].LocalPeer())) + if c01l > 2 { + t.Error("0->1 has", c01l) + } + c10l := len(swarms[1].ConnsToPeer(swarms[0].LocalPeer())) + if c10l > 2 { + t.Error("1->0 has", c10l) + } + + for _, s := range swarms { + s.Close() + } +} + +func newSilentPeer(t *testing.T) (peer.ID, ma.Multiaddr, net.Listener) { + dst := testutil.RandPeerIDFatal(t) + lst, err := net.Listen("tcp4", "localhost:0") + if err != nil { + t.Fatal(err) + } + addr, err := manet.FromNetAddr(lst.Addr()) + if err != nil { + t.Fatal(err) + } + addrs := []ma.Multiaddr{addr} + addrs, err = addrutil.ResolveUnspecifiedAddresses(addrs, nil) + if err != nil { + t.Fatal(err) + } + t.Log("new silent peer:", dst, addrs[0]) + return dst, addrs[0], lst +} + +func TestDialWait(t *testing.T) { + t.Parallel() + + ctx := context.Background() + swarms := makeSwarms(ctx, t, 1) + s1 := swarms[0] + defer s1.Close() + + // dial to a non-existent peer. + s2p, s2addr, s2l := newSilentPeer(t) + go acceptAndHang(s2l) + defer s2l.Close() + s1.Peerstore().AddAddr(s2p, s2addr, peerstore.PermanentAddrTTL) + + before := time.Now() + if c, err := s1.DialPeer(ctx, s2p); err == nil { + defer c.Close() + t.Fatal("error swarm dialing to unknown peer worked...", err) + } else { + t.Log("correctly got error:", err) + } + duration := time.Since(before) + + if duration < transport.DialTimeout*DialAttempts { + t.Error("< transport.DialTimeout * DialAttempts not being respected", duration, transport.DialTimeout*DialAttempts) + } + if duration > 2*transport.DialTimeout*DialAttempts { + t.Error("> 2*transport.DialTimeout * DialAttempts not being respected", duration, 2*transport.DialTimeout*DialAttempts) + } + + if !s1.Backoff().Backoff(s2p, s2addr) { + t.Error("s2 should now be on backoff") + } +} + +func TestDialBackoff(t *testing.T) { + // t.Skip("skipping for another test") + if ci.IsRunning() { + t.Skip("travis will never have fun with this test") + } + + t.Parallel() + + ctx := context.Background() + swarms := makeSwarms(ctx, t, 2) + s1 := swarms[0] + s2 := swarms[1] + defer s1.Close() + defer s2.Close() + + s2addrs, err := s2.InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + s1.Peerstore().AddAddrs(s2.LocalPeer(), s2addrs, peerstore.PermanentAddrTTL) + + // dial to a non-existent peer. + s3p, s3addr, s3l := newSilentPeer(t) + go acceptAndHang(s3l) + defer s3l.Close() + s1.Peerstore().AddAddr(s3p, s3addr, peerstore.PermanentAddrTTL) + + // in this test we will: + // 1) dial 10x to each node. + // 2) all dials should hang + // 3) s1->s2 should succeed. + // 4) s1->s3 should not (and should place s3 on backoff) + // 5) disconnect entirely + // 6) dial 10x to each node again + // 7) s3 dials should all return immediately (except 1) + // 8) s2 dials should all hang, and succeed + // 9) last s3 dial ends, unsuccessful + + dialOnlineNode := func(dst peer.ID, times int) <-chan bool { + ch := make(chan bool) + for i := 0; i < times; i++ { + go func() { + if _, err := s1.DialPeer(ctx, dst); err != nil { + t.Error("error dialing", dst, err) + ch <- false + } else { + ch <- true + } + }() + } + return ch + } + + dialOfflineNode := func(dst peer.ID, times int) <-chan bool { + ch := make(chan bool) + for i := 0; i < times; i++ { + go func() { + if c, err := s1.DialPeer(ctx, dst); err != nil { + ch <- false + } else { + t.Error("succeeded in dialing", dst) + ch <- true + c.Close() + } + }() + } + return ch + } + + { + // 1) dial 10x to each node. + N := 10 + s2done := dialOnlineNode(s2.LocalPeer(), N) + s3done := dialOfflineNode(s3p, N) + + // when all dials should be done by: + dialTimeout1x := time.After(transport.DialTimeout) + dialTimeout10Ax := time.After(transport.DialTimeout * 2 * 10) // DialAttempts * 10) + + // 2) all dials should hang + select { + case <-s2done: + t.Error("s2 should not happen immediately") + case <-s3done: + t.Error("s3 should not happen yet") + case <-time.After(time.Millisecond): + // s2 may finish very quickly, so let's get out. + } + + // 3) s1->s2 should succeed. + for i := 0; i < N; i++ { + select { + case r := <-s2done: + if !r { + t.Error("s2 should not fail") + } + case <-s3done: + t.Error("s3 should not happen yet") + case <-dialTimeout1x: + t.Error("s2 took too long") + } + } + + select { + case <-s2done: + t.Error("s2 should have no more") + case <-s3done: + t.Error("s3 should not happen yet") + case <-dialTimeout1x: // let it pass + } + + // 4) s1->s3 should not (and should place s3 on backoff) + // N-1 should finish before dialTimeout1x * 2 + for i := 0; i < N; i++ { + select { + case <-s2done: + t.Error("s2 should have no more") + case r := <-s3done: + if r { + t.Error("s3 should not succeed") + } + case <-(dialTimeout1x): + if i < (N - 1) { + t.Fatal("s3 took too long") + } + t.Log("dialTimeout1x * 1.3 hit for last peer") + case <-dialTimeout10Ax: + t.Fatal("s3 took too long") + } + } + + // check backoff state + if s1.Backoff().Backoff(s2.LocalPeer(), s2addrs[0]) { + t.Error("s2 should not be on backoff") + } + if !s1.Backoff().Backoff(s3p, s3addr) { + t.Error("s3 should be on backoff") + } + + // 5) disconnect entirely + + for _, c := range s1.Conns() { + c.Close() + } + for i := 0; i < 100 && len(s1.Conns()) > 0; i++ { + <-time.After(time.Millisecond) + } + if len(s1.Conns()) > 0 { + t.Fatal("s1 conns must exit") + } + } + + { + // 6) dial 10x to each node again + N := 10 + s2done := dialOnlineNode(s2.LocalPeer(), N) + s3done := dialOfflineNode(s3p, N) + + // when all dials should be done by: + dialTimeout1x := time.After(transport.DialTimeout) + dialTimeout10Ax := time.After(transport.DialTimeout * 2 * 10) // DialAttempts * 10) + + // 7) s3 dials should all return immediately (except 1) + for i := 0; i < N-1; i++ { + select { + case <-s2done: + t.Error("s2 should not succeed yet") + case r := <-s3done: + if r { + t.Error("s3 should not succeed") + } + case <-dialTimeout1x: + t.Fatal("s3 took too long") + } + } + + // 8) s2 dials should all hang, and succeed + for i := 0; i < N; i++ { + select { + case r := <-s2done: + if !r { + t.Error("s2 should succeed") + } + // case <-s3done: + case <-(dialTimeout1x): + t.Fatal("s3 took too long") + } + } + + // 9) the last s3 should return, failed. + select { + case <-s2done: + t.Error("s2 should have no more") + case r := <-s3done: + if r { + t.Error("s3 should not succeed") + } + case <-dialTimeout10Ax: + t.Fatal("s3 took too long") + } + + // check backoff state (the same) + if s1.Backoff().Backoff(s2.LocalPeer(), s2addrs[0]) { + t.Error("s2 should not be on backoff") + } + if !s1.Backoff().Backoff(s3p, s3addr) { + t.Error("s3 should be on backoff") + } + } +} + +func TestDialBackoffClears(t *testing.T) { + // t.Skip("skipping for another test") + t.Parallel() + + ctx := context.Background() + swarms := makeSwarms(ctx, t, 2) + s1 := swarms[0] + s2 := swarms[1] + defer s1.Close() + defer s2.Close() + + // use another address first, that accept and hang on conns + _, s2bad, s2l := newSilentPeer(t) + go acceptAndHang(s2l) + defer s2l.Close() + + // phase 1 -- dial to non-operational addresses + s1.Peerstore().AddAddr(s2.LocalPeer(), s2bad, peerstore.PermanentAddrTTL) + + before := time.Now() + c, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err == nil { + defer c.Close() + t.Fatal("dialing to broken addr worked...", err) + } else { + t.Log("correctly got error:", err) + } + duration := time.Since(before) + + if duration < transport.DialTimeout*DialAttempts { + t.Error("< transport.DialTimeout * DialAttempts not being respected", duration, transport.DialTimeout*DialAttempts) + } + if duration > 2*transport.DialTimeout*DialAttempts { + t.Error("> 2*transport.DialTimeout * DialAttempts not being respected", duration, 2*transport.DialTimeout*DialAttempts) + } + + if !s1.Backoff().Backoff(s2.LocalPeer(), s2bad) { + t.Error("s2 should now be on backoff") + } else { + t.Log("correctly added to backoff") + } + + // phase 2 -- add the working address. dial should succeed. + ifaceAddrs1, err := swarms[1].InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + s1.Peerstore().AddAddrs(s2.LocalPeer(), ifaceAddrs1, peerstore.PermanentAddrTTL) + + if c, err := s1.DialPeer(ctx, s2.LocalPeer()); err == nil { + c.Close() + t.Log("backoffs are per address, not peer") + } + + time.Sleep(BackoffBase) + + if c, err := s1.DialPeer(ctx, s2.LocalPeer()); err != nil { + t.Fatal(err) + } else { + c.Close() + t.Log("correctly connected") + } + + if s1.Backoff().Backoff(s2.LocalPeer(), s2bad) { + t.Error("s2 should no longer be on backoff") + } else { + t.Log("correctly cleared backoff") + } +} + +func TestDialPeerFailed(t *testing.T) { + t.Parallel() + ctx := context.Background() + + swarms := makeSwarms(ctx, t, 2) + defer closeSwarms(swarms) + testedSwarm, targetSwarm := swarms[0], swarms[1] + + expectedErrorsCount := 5 + for i := 0; i < expectedErrorsCount; i++ { + _, silentPeerAddress, silentPeerListener := newSilentPeer(t) + go acceptAndHang(silentPeerListener) + defer silentPeerListener.Close() + + testedSwarm.Peerstore().AddAddr( + targetSwarm.LocalPeer(), + silentPeerAddress, + peerstore.PermanentAddrTTL) + } + + _, err := testedSwarm.DialPeer(ctx, targetSwarm.LocalPeer()) + if err == nil { + t.Fatal(err) + } + + // dial_test.go:508: correctly get a combined error: failed to dial PEER: all dials failed + // * [/ip4/127.0.0.1/tcp/46485] failed to negotiate security protocol: context deadline exceeded + // * [/ip4/127.0.0.1/tcp/34881] failed to negotiate security protocol: context deadline exceeded + // ... + + dialErr, ok := err.(*DialError) + if !ok { + t.Fatalf("expected *DialError, got %T", err) + } + + if len(dialErr.DialErrors) != expectedErrorsCount { + t.Errorf("expected %d errors, got %d", expectedErrorsCount, len(dialErr.DialErrors)) + } +} + +func TestDialExistingConnection(t *testing.T) { + ctx := context.Background() + + swarms := makeSwarms(ctx, t, 2) + defer closeSwarms(swarms) + s1 := swarms[0] + s2 := swarms[1] + + s1.Peerstore().AddAddrs(s2.LocalPeer(), s2.ListenAddresses(), peerstore.PermanentAddrTTL) + + c1, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err != nil { + t.Fatal(err) + } + + c2, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err != nil { + t.Fatal(err) + } + + if c1 != c2 { + t.Fatal("expecting the same connection from both dials") + } +} + +func newSilentListener(t *testing.T) ([]ma.Multiaddr, net.Listener) { + lst, err := net.Listen("tcp4", "localhost:0") + if err != nil { + t.Fatal(err) + } + addr, err := manet.FromNetAddr(lst.Addr()) + if err != nil { + t.Fatal(err) + } + addrs := []ma.Multiaddr{addr} + addrs, err = addrutil.ResolveUnspecifiedAddresses(addrs, nil) + if err != nil { + t.Fatal(err) + } + return addrs, lst + +} + +func TestDialSimultaneousJoin(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + swarms := makeSwarms(ctx, t, 2) + s1 := swarms[0] + s2 := swarms[1] + defer s1.Close() + defer s2.Close() + + s2silentAddrs, s2silentListener := newSilentListener(t) + go acceptAndHang(s2silentListener) + + connch := make(chan network.Conn, 512) + errs := make(chan error, 2) + + // start a dial to s2 through the silent addr + go func() { + s1.Peerstore().AddAddrs(s2.LocalPeer(), s2silentAddrs, peerstore.PermanentAddrTTL) + + c, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err != nil { + errs <- err + connch <- nil + return + } + + t.Logf("first dial succedded; conn: %+v", c) + + connch <- c + errs <- nil + }() + + // wait a bit for the dial to take hold + time.Sleep(100 * time.Millisecond) + + // start a second dial to s2 that uses the real s2 addrs + go func() { + s2addrs, err := s2.InterfaceListenAddresses() + if err != nil { + errs <- err + return + } + s1.Peerstore().AddAddrs(s2.LocalPeer(), s2addrs[:1], peerstore.PermanentAddrTTL) + + c, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err != nil { + errs <- err + connch <- nil + return + } + + t.Logf("second dial succedded; conn: %+v", c) + + connch <- c + errs <- nil + }() + + // wait for the second dial to finish + c2 := <-connch + + // start a third dial to s2, this should get the existing connection from the successful dial + go func() { + c, err := s1.DialPeer(ctx, s2.LocalPeer()) + if err != nil { + errs <- err + connch <- nil + return + } + + t.Logf("third dial succedded; conn: %+v", c) + + connch <- c + errs <- nil + }() + + c3 := <-connch + + // raise any errors from the previous goroutines + for i := 0; i < 3; i++ { + err := <-errs + if err != nil { + t.Fatal(err) + } + } + + if c2 != c3 { + t.Fatal("expected c2 and c3 to be the same") + } + + // next, the first dial to s2, using the silent addr should timeout; at this point the dial + // will error but the last chance check will see the existing connection and return it + select { + case c1 := <-connch: + if c1 != c2 { + t.Fatal("expected c1 and c2 to be the same") + } + case <-time.After(2 * transport.DialTimeout): + t.Fatal("no connection from first dial") + } +} + +func TestDialSelf2(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + swarms := makeSwarms(ctx, t, 2) + s1 := swarms[0] + defer s1.Close() + + _, err := s1.DialPeer(ctx, s1.LocalPeer()) + if err != ErrDialToSelf { + t.Fatal("expected error from self dial") + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..09d8a3a3a976d57dfaea3a1c887fd208f1baf01b --- /dev/null +++ b/go.mod @@ -0,0 +1,23 @@ +module github.com/libp2p/go-libp2p-swarm + +go 1.15 + +require ( + github.com/ipfs/go-log v1.0.4 + github.com/jbenet/goprocess v0.1.4 + github.com/libp2p/go-addr-util v0.0.2 + github.com/libp2p/go-conn-security-multistream v0.2.1 + github.com/libp2p/go-libp2p-core v0.8.5 + github.com/libp2p/go-libp2p-peerstore v0.2.6 + github.com/libp2p/go-libp2p-quic-transport v0.10.0 + github.com/libp2p/go-libp2p-testing v0.4.0 + github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 + github.com/libp2p/go-libp2p-yamux v0.5.0 + github.com/libp2p/go-maddr-filter v0.1.0 + github.com/libp2p/go-stream-muxer-multistream v0.3.0 + github.com/libp2p/go-tcp-transport v0.2.0 + github.com/multiformats/go-multiaddr v0.3.1 + github.com/multiformats/go-multiaddr-fmt v0.1.0 + github.com/stretchr/testify v1.6.1 + github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000000000000000000000000000000000..398807a9a209864c14708939744900b022b280b8 --- /dev/null +++ b/go.sum @@ -0,0 +1,588 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0= +github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= +github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= +github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= +github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= +github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= +github.com/ipfs/go-log/v2 v2.1.1 h1:G4TtqN+V9y9HY9TA6BwbCVyyBZ2B9MbCjR2MtGx8FR0= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= +github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= +github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= +github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= +github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= +github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= +github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= +github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= +github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= +github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= +github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.8.5 h1:aEgbIcPGsKy6zYcC+5AJivYFedhYa4sW7mIpWpUaLKw= +github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= +github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= +github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= +github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= +github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= +github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= +github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= +github.com/libp2p/go-libp2p-testing v0.4.0 h1:PrwHRi0IGqOwVQWR3xzgigSlhlLfxgfXgkHxr77EghQ= +github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= +github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= +github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2 h1:4JsnbfJzgZeRS9AWN7B9dPqn/LY/HoQTlO9gtdJTIYM= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-yamux v0.5.0 h1:ZzmUhbQE+X7NuYUT2naxN31JyebZfRmpZVhKtRP13ys= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= +github.com/libp2p/go-maddr-filter v0.1.0 h1:4ACqZKw8AqiuJfwFGq1CYDFugfXTOos+qQ3DETkhtCE= +github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= +github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= +github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= +github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M= +github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-sockaddr v0.0.2 h1:tCuXfpA9rq7llM/v834RKc/Xvovy/AqM9kHvTV/jY/Q= +github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= +github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWciG5LyYAPo= +github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= +github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= +github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= +github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= +github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= +github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= +github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= +github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= +github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= +github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.2.1 h1:R5exp4cKvGlePuxg/bn4cnV53K4DxCe+uldxs7QzfrE= +github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= +github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= +github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/limiter.go b/limiter.go new file mode 100644 index 0000000000000000000000000000000000000000..3e20976b321622e1c81dd5bd6e3fb880e08b7897 --- /dev/null +++ b/limiter.go @@ -0,0 +1,238 @@ +package swarm + +import ( + "context" + "os" + "strconv" + "sync" + "time" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/transport" + + ma "github.com/multiformats/go-multiaddr" +) + +type dialResult struct { + Conn transport.CapableConn + Addr ma.Multiaddr + Err error +} + +type dialJob struct { + addr ma.Multiaddr + peer peer.ID + ctx context.Context + resp chan dialResult +} + +func (dj *dialJob) cancelled() bool { + return dj.ctx.Err() != nil +} + +func (dj *dialJob) dialTimeout() time.Duration { + timeout := transport.DialTimeout + if lowTimeoutFilters.AddrBlocked(dj.addr) { + timeout = DialTimeoutLocal + } + + return timeout +} + +type dialLimiter struct { + lk sync.Mutex + + isFdConsumingFnc isFdConsumingFnc + fdConsuming int + fdLimit int + waitingOnFd []*dialJob + + dialFunc dialfunc + + activePerPeer map[peer.ID]int + perPeerLimit int + waitingOnPeerLimit map[peer.ID][]*dialJob +} + +type dialfunc func(context.Context, peer.ID, ma.Multiaddr) (transport.CapableConn, error) +type isFdConsumingFnc func(ma.Multiaddr) bool + +func newDialLimiter(df dialfunc, fdFnc isFdConsumingFnc) *dialLimiter { + fd := ConcurrentFdDials + if env := os.Getenv("LIBP2P_SWARM_FD_LIMIT"); env != "" { + if n, err := strconv.ParseInt(env, 10, 32); err == nil { + fd = int(n) + } + } + return newDialLimiterWithParams(fdFnc, df, fd, DefaultPerPeerRateLimit) +} + +func newDialLimiterWithParams(isFdConsumingFnc isFdConsumingFnc, df dialfunc, fdLimit, perPeerLimit int) *dialLimiter { + return &dialLimiter{ + isFdConsumingFnc: isFdConsumingFnc, + fdLimit: fdLimit, + perPeerLimit: perPeerLimit, + waitingOnPeerLimit: make(map[peer.ID][]*dialJob), + activePerPeer: make(map[peer.ID]int), + dialFunc: df, + } +} + +// freeFDToken frees FD token and if there are any schedules another waiting dialJob +// in it's place +func (dl *dialLimiter) freeFDToken() { + log.Debugf("[limiter] freeing FD token; waiting: %d; consuming: %d", len(dl.waitingOnFd), dl.fdConsuming) + dl.fdConsuming-- + + for len(dl.waitingOnFd) > 0 { + next := dl.waitingOnFd[0] + dl.waitingOnFd[0] = nil // clear out memory + dl.waitingOnFd = dl.waitingOnFd[1:] + + if len(dl.waitingOnFd) == 0 { + // clear out memory. + dl.waitingOnFd = nil + } + + // Skip over canceled dials instead of queuing up a goroutine. + if next.cancelled() { + dl.freePeerToken(next) + continue + } + dl.fdConsuming++ + + // we already have activePerPeer token at this point so we can just dial + go dl.executeDial(next) + return + } +} + +func (dl *dialLimiter) freePeerToken(dj *dialJob) { + log.Debugf("[limiter] freeing peer token; peer %s; addr: %s; active for peer: %d; waiting on peer limit: %d", + dj.peer, dj.addr, dl.activePerPeer[dj.peer], len(dl.waitingOnPeerLimit[dj.peer])) + // release tokens in reverse order than we take them + dl.activePerPeer[dj.peer]-- + if dl.activePerPeer[dj.peer] == 0 { + delete(dl.activePerPeer, dj.peer) + } + + waitlist := dl.waitingOnPeerLimit[dj.peer] + for len(waitlist) > 0 { + next := waitlist[0] + waitlist[0] = nil // clear out memory + waitlist = waitlist[1:] + + if len(waitlist) == 0 { + delete(dl.waitingOnPeerLimit, next.peer) + } else { + dl.waitingOnPeerLimit[next.peer] = waitlist + } + + if next.cancelled() { + continue + } + + dl.activePerPeer[next.peer]++ // just kidding, we still want this token + + dl.addCheckFdLimit(next) + return + } +} + +func (dl *dialLimiter) finishedDial(dj *dialJob) { + dl.lk.Lock() + defer dl.lk.Unlock() + if dl.shouldConsumeFd(dj.addr) { + dl.freeFDToken() + } + + dl.freePeerToken(dj) +} + +func (dl *dialLimiter) shouldConsumeFd(addr ma.Multiaddr) bool { + // we don't consume FD's for relay addresses for now as they will be consumed when the Relay Transport + // actually dials the Relay server. That dial call will also pass through this limiter with + // the address of the relay server i.e. non-relay address. + _, err := addr.ValueForProtocol(ma.P_CIRCUIT) + + isRelay := err == nil + + return !isRelay && dl.isFdConsumingFnc(addr) +} + +func (dl *dialLimiter) addCheckFdLimit(dj *dialJob) { + if dl.shouldConsumeFd(dj.addr) { + if dl.fdConsuming >= dl.fdLimit { + log.Debugf("[limiter] blocked dial waiting on FD token; peer: %s; addr: %s; consuming: %d; "+ + "limit: %d; waiting: %d", dj.peer, dj.addr, dl.fdConsuming, dl.fdLimit, len(dl.waitingOnFd)) + dl.waitingOnFd = append(dl.waitingOnFd, dj) + return + } + + log.Debugf("[limiter] taking FD token: peer: %s; addr: %s; prev consuming: %d", + dj.peer, dj.addr, dl.fdConsuming) + // take token + dl.fdConsuming++ + } + + log.Debugf("[limiter] executing dial; peer: %s; addr: %s; FD consuming: %d; waiting: %d", + dj.peer, dj.addr, dl.fdConsuming, len(dl.waitingOnFd)) + go dl.executeDial(dj) +} + +func (dl *dialLimiter) addCheckPeerLimit(dj *dialJob) { + if dl.activePerPeer[dj.peer] >= dl.perPeerLimit { + log.Debugf("[limiter] blocked dial waiting on peer limit; peer: %s; addr: %s; active: %d; "+ + "peer limit: %d; waiting: %d", dj.peer, dj.addr, dl.activePerPeer[dj.peer], dl.perPeerLimit, + len(dl.waitingOnPeerLimit[dj.peer])) + wlist := dl.waitingOnPeerLimit[dj.peer] + dl.waitingOnPeerLimit[dj.peer] = append(wlist, dj) + return + } + dl.activePerPeer[dj.peer]++ + + dl.addCheckFdLimit(dj) +} + +// AddDialJob tries to take the needed tokens for starting the given dial job. +// If it acquires all needed tokens, it immediately starts the dial, otherwise +// it will put it on the waitlist for the requested token. +func (dl *dialLimiter) AddDialJob(dj *dialJob) { + dl.lk.Lock() + defer dl.lk.Unlock() + + log.Debugf("[limiter] adding a dial job through limiter: %v", dj.addr) + dl.addCheckPeerLimit(dj) +} + +func (dl *dialLimiter) clearAllPeerDials(p peer.ID) { + dl.lk.Lock() + defer dl.lk.Unlock() + delete(dl.waitingOnPeerLimit, p) + log.Debugf("[limiter] clearing all peer dials: %v", p) + // NB: the waitingOnFd list doesn't need to be cleaned out here, we will + // remove them as we encounter them because they are 'cancelled' at this + // point +} + +// executeDial calls the dialFunc, and reports the result through the response +// channel when finished. Once the response is sent it also releases all tokens +// it held during the dial. +func (dl *dialLimiter) executeDial(j *dialJob) { + defer dl.finishedDial(j) + if j.cancelled() { + return + } + + dctx, cancel := context.WithTimeout(j.ctx, j.dialTimeout()) + defer cancel() + + con, err := dl.dialFunc(dctx, j.peer, j.addr) + select { + case j.resp <- dialResult{Conn: con, Addr: j.addr, Err: err}: + case <-j.ctx.Done(): + if err == nil { + con.Close() + } + } +} diff --git a/limiter_test.go b/limiter_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1aefffec6d2acb143aa4dc36ab7fda5568730ee2 --- /dev/null +++ b/limiter_test.go @@ -0,0 +1,420 @@ +package swarm + +import ( + "context" + "errors" + "fmt" + "math/rand" + "strconv" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/test" + "github.com/libp2p/go-libp2p-core/transport" + + ma "github.com/multiformats/go-multiaddr" + mafmt "github.com/multiformats/go-multiaddr-fmt" +) + +var isFdConsuming = func(addr ma.Multiaddr) bool { + res := false + + ma.ForEach(addr, func(c ma.Component) bool { + if c.Protocol().Code == ma.P_TCP { + res = true + return false + } + return true + }) + return res +} + +func mustAddr(t *testing.T, s string) ma.Multiaddr { + a, err := ma.NewMultiaddr(s) + if err != nil { + t.Fatal(err) + } + return a +} + +func addrWithPort(t *testing.T, p int) ma.Multiaddr { + return mustAddr(t, fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", p)) +} + +// in these tests I use addresses with tcp ports over a certain number to +// signify 'good' addresses that will succeed, and addresses below that number +// will fail. This lets us more easily test these different scenarios. +func tcpPortOver(a ma.Multiaddr, n int) bool { + port, err := a.ValueForProtocol(ma.P_TCP) + if err != nil { + panic(err) + } + + pnum, err := strconv.Atoi(port) + if err != nil { + panic(err) + } + + return pnum > n +} + +func tryDialAddrs(ctx context.Context, l *dialLimiter, p peer.ID, addrs []ma.Multiaddr, res chan dialResult) { + for _, a := range addrs { + l.AddDialJob(&dialJob{ + ctx: ctx, + peer: p, + addr: a, + resp: res, + }) + } +} + +func hangDialFunc(hang chan struct{}) dialfunc { + return func(ctx context.Context, p peer.ID, a ma.Multiaddr) (transport.CapableConn, error) { + if mafmt.UTP.Matches(a) { + return transport.CapableConn(nil), nil + } + + _, err := a.ValueForProtocol(ma.P_CIRCUIT) + if err == nil { + return transport.CapableConn(nil), nil + } + + if tcpPortOver(a, 10) { + return transport.CapableConn(nil), nil + } + + <-hang + return nil, fmt.Errorf("test bad dial") + } +} + +func TestLimiterBasicDials(t *testing.T) { + hang := make(chan struct{}) + defer close(hang) + + l := newDialLimiterWithParams(isFdConsuming, hangDialFunc(hang), ConcurrentFdDials, 4) + + bads := []ma.Multiaddr{addrWithPort(t, 1), addrWithPort(t, 2), addrWithPort(t, 3), addrWithPort(t, 4)} + good := addrWithPort(t, 20) + + resch := make(chan dialResult) + pid := peer.ID("testpeer") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tryDialAddrs(ctx, l, pid, bads, resch) + + l.AddDialJob(&dialJob{ + ctx: ctx, + peer: pid, + addr: good, + resp: resch, + }) + + select { + case <-resch: + t.Fatal("no dials should have completed!") + case <-time.After(time.Millisecond * 100): + } + + // complete a single hung dial + hang <- struct{}{} + + select { + case r := <-resch: + if r.Err == nil { + t.Fatal("should have gotten failed dial result") + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for dial completion") + } + + select { + case r := <-resch: + if r.Err != nil { + t.Fatal("expected second result to be success!") + } + case <-time.After(time.Second): + } +} + +func TestFDLimiting(t *testing.T) { + hang := make(chan struct{}) + defer close(hang) + l := newDialLimiterWithParams(isFdConsuming, hangDialFunc(hang), 16, 5) + + bads := []ma.Multiaddr{addrWithPort(t, 1), addrWithPort(t, 2), addrWithPort(t, 3), addrWithPort(t, 4)} + pids := []peer.ID{"testpeer1", "testpeer2", "testpeer3", "testpeer4"} + goodTCP := addrWithPort(t, 20) + + ctx := context.Background() + resch := make(chan dialResult) + + // take all fd limit tokens with hang dials + for _, pid := range pids { + tryDialAddrs(ctx, l, pid, bads, resch) + } + + // these dials should work normally, but will hang because we have taken + // up all the fd limiting + for _, pid := range pids { + l.AddDialJob(&dialJob{ + ctx: ctx, + peer: pid, + addr: goodTCP, + resp: resch, + }) + } + + select { + case <-resch: + t.Fatal("no dials should have completed!") + case <-time.After(time.Millisecond * 100): + } + + pid5 := peer.ID("testpeer5") + utpaddr := mustAddr(t, "/ip4/127.0.0.1/udp/7777/utp") + + // This should complete immediately since utp addresses arent blocked by fd rate limiting + l.AddDialJob(&dialJob{ctx: ctx, peer: pid5, addr: utpaddr, resp: resch}) + + select { + case res := <-resch: + if res.Err != nil { + t.Fatal("should have gotten successful response") + } + case <-time.After(time.Second * 5): + t.Fatal("timeout waiting for utp addr success") + } + + // A relay address with tcp transport will complete because we do not consume fds for dials + // with relay addresses as the fd will be consumed when we actually dial the relay server. + pid6 := test.RandPeerIDFatal(t) + relayAddr := mustAddr(t, fmt.Sprintf("/ip4/127.0.0.1/tcp/20/p2p-circuit/p2p/%s", pid6)) + l.AddDialJob(&dialJob{ctx: ctx, peer: pid6, addr: relayAddr, resp: resch}) + + select { + case res := <-resch: + if res.Err != nil { + t.Fatal("should have gotten successful response") + } + case <-time.After(time.Second * 5): + t.Fatal("timeout waiting for relay addr success") + } +} + +func TestTokenRedistribution(t *testing.T) { + var lk sync.Mutex + hangchs := make(map[peer.ID]chan struct{}) + df := func(ctx context.Context, p peer.ID, a ma.Multiaddr) (transport.CapableConn, error) { + if tcpPortOver(a, 10) { + return (transport.CapableConn)(nil), nil + } + + lk.Lock() + ch := hangchs[p] + lk.Unlock() + <-ch + return nil, fmt.Errorf("test bad dial") + } + l := newDialLimiterWithParams(isFdConsuming, df, 8, 4) + + bads := []ma.Multiaddr{addrWithPort(t, 1), addrWithPort(t, 2), addrWithPort(t, 3), addrWithPort(t, 4)} + pids := []peer.ID{"testpeer1", "testpeer2"} + + ctx := context.Background() + resch := make(chan dialResult) + + // take all fd limit tokens with hang dials + for _, pid := range pids { + hangchs[pid] = make(chan struct{}) + } + + for _, pid := range pids { + tryDialAddrs(ctx, l, pid, bads, resch) + } + + good := mustAddr(t, "/ip4/127.0.0.1/tcp/1001") + + // add a good dial job for peer 1 + l.AddDialJob(&dialJob{ + ctx: ctx, + peer: pids[1], + addr: good, + resp: resch, + }) + + select { + case <-resch: + t.Fatal("no dials should have completed!") + case <-time.After(time.Millisecond * 100): + } + + // unblock one dial for peer 0 + hangchs[pids[0]] <- struct{}{} + + select { + case res := <-resch: + if res.Err == nil { + t.Fatal("should have only been a failure here") + } + case <-time.After(time.Millisecond * 100): + t.Fatal("expected a dial failure here") + } + + select { + case <-resch: + t.Fatal("no more dials should have completed!") + case <-time.After(time.Millisecond * 100): + } + + // add a bad dial job to peer 0 to fill their rate limiter + // and test that more dials for this peer won't interfere with peer 1's successful dial incoming + l.AddDialJob(&dialJob{ + ctx: ctx, + peer: pids[0], + addr: addrWithPort(t, 7), + resp: resch, + }) + + hangchs[pids[1]] <- struct{}{} + + // now one failed dial from peer 1 should get through and fail + // which will in turn unblock the successful dial on peer 1 + select { + case res := <-resch: + if res.Err == nil { + t.Fatal("should have only been a failure here") + } + case <-time.After(time.Millisecond * 100): + t.Fatal("expected a dial failure here") + } + + select { + case res := <-resch: + if res.Err != nil { + t.Fatal("should have succeeded!") + } + case <-time.After(time.Millisecond * 100): + t.Fatal("should have gotten successful dial") + } +} + +func TestStressLimiter(t *testing.T) { + df := func(ctx context.Context, p peer.ID, a ma.Multiaddr) (transport.CapableConn, error) { + if tcpPortOver(a, 1000) { + return transport.CapableConn(nil), nil + } + + time.Sleep(time.Millisecond * time.Duration(5+rand.Intn(100))) + return nil, fmt.Errorf("test bad dial") + } + + l := newDialLimiterWithParams(isFdConsuming, df, 20, 5) + + var bads []ma.Multiaddr + for i := 0; i < 100; i++ { + bads = append(bads, addrWithPort(t, i)) + } + + addresses := append(bads, addrWithPort(t, 2000)) + success := make(chan struct{}) + + for i := 0; i < 20; i++ { + go func(id peer.ID) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resp := make(chan dialResult) + time.Sleep(time.Duration(rand.Intn(10)) * time.Millisecond) + for _, i := range rand.Perm(len(addresses)) { + l.AddDialJob(&dialJob{ + addr: addresses[i], + ctx: ctx, + peer: id, + resp: resp, + }) + } + + for res := range resp { + if res.Err == nil { + success <- struct{}{} + return + } + } + }(peer.ID(fmt.Sprintf("testpeer%d", i))) + } + + for i := 0; i < 20; i++ { + select { + case <-success: + case <-time.After(time.Second * 5): + t.Fatal("expected a success within five seconds") + } + } +} + +func TestFDLimitUnderflow(t *testing.T) { + df := func(ctx context.Context, p peer.ID, addr ma.Multiaddr) (transport.CapableConn, error) { + select { + case <-ctx.Done(): + case <-time.After(5 * time.Second): + } + return nil, fmt.Errorf("df timed out") + } + + const fdLimit = 20 + l := newDialLimiterWithParams(isFdConsuming, df, fdLimit, 3) + + var addrs []ma.Multiaddr + for i := 0; i <= 1000; i++ { + addrs = append(addrs, addrWithPort(t, i)) + } + + wg := sync.WaitGroup{} + const num = 3 * fdLimit + wg.Add(num) + errs := make(chan error, num) + for i := 0; i < num; i++ { + go func(id peer.ID, i int) { + defer wg.Done() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resp := make(chan dialResult) + l.AddDialJob(&dialJob{ + addr: addrs[i], + ctx: ctx, + peer: id, + resp: resp, + }) + + for res := range resp { + if res.Err != nil { + return + } + errs <- errors.New("got dial res, but shouldn't") + } + }(peer.ID(fmt.Sprintf("testpeer%d", i%20)), i) + } + + go func() { + wg.Wait() + close(errs) + }() + + for err := range errs { + t.Fatal(err) + } + + l.lk.Lock() + fdConsuming := l.fdConsuming + l.lk.Unlock() + + if fdConsuming < 0 { + t.Fatalf("l.fdConsuming < 0") + } +} diff --git a/peers_test.go b/peers_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8e82bf5bde14372cee18e995e4c52f466d799d94 --- /dev/null +++ b/peers_test.go @@ -0,0 +1,72 @@ +package swarm_test + +import ( + "context" + "testing" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peerstore" + + ma "github.com/multiformats/go-multiaddr" + + . "github.com/libp2p/go-libp2p-swarm" +) + +func TestPeers(t *testing.T) { + ctx := context.Background() + swarms := makeSwarms(ctx, t, 2) + s1 := swarms[0] + s2 := swarms[1] + + connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) { + // TODO: make a DialAddr func. + s.Peerstore().AddAddr(dst, addr, peerstore.PermanentAddrTTL) + // t.Logf("connections from %s", s.LocalPeer()) + // for _, c := range s.ConnsToPeer(dst) { + // t.Logf("connection from %s to %s: %v", s.LocalPeer(), dst, c) + // } + // t.Logf("") + if _, err := s.DialPeer(ctx, dst); err != nil { + t.Fatal("error swarm dialing to peer", err) + } + // t.Log(s.swarm.Dump()) + } + + s1GotConn := make(chan struct{}) + s2GotConn := make(chan struct{}) + s1.SetConnHandler(func(c network.Conn) { + s1GotConn <- struct{}{} + }) + s2.SetConnHandler(func(c network.Conn) { + s2GotConn <- struct{}{} + }) + + connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) + <-s2GotConn // have to wait here so the other side catches up. + connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0]) + + for i := 0; i < 100; i++ { + connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) + connect(s2, s1.LocalPeer(), s1.ListenAddresses()[0]) + } + + for _, s := range swarms { + log.Infof("%s swarm routing table: %s", s.LocalPeer(), s.Peers()) + } + + test := func(s *Swarm) { + expect := 1 + actual := len(s.Peers()) + if actual != expect { + t.Errorf("%s has %d peers, not %d: %v", s.LocalPeer(), actual, expect, s.Peers()) + } + actual = len(s.Conns()) + if actual != expect { + t.Errorf("%s has %d conns, not %d: %v", s.LocalPeer(), actual, expect, s.Conns()) + } + } + + test(s1) + test(s2) +} diff --git a/simul_test.go b/simul_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0373e37dfb52c3c32262dc27b257ac9510fc15fd --- /dev/null +++ b/simul_test.go @@ -0,0 +1,81 @@ +package swarm_test + +import ( + "context" + "runtime" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peerstore" + + ma "github.com/multiformats/go-multiaddr" + + . "github.com/libp2p/go-libp2p-swarm" + swarmt "github.com/libp2p/go-libp2p-swarm/testing" + "github.com/libp2p/go-libp2p-testing/ci" +) + +func TestSimultOpen(t *testing.T) { + + t.Parallel() + + ctx := context.Background() + swarms := makeSwarms(ctx, t, 2, swarmt.OptDisableReuseport) + + // connect everyone + { + var wg sync.WaitGroup + connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) { + defer wg.Done() + // copy for other peer + log.Debugf("TestSimultOpen: connecting: %s --> %s (%s)", s.LocalPeer(), dst, addr) + s.Peerstore().AddAddr(dst, addr, peerstore.PermanentAddrTTL) + if _, err := s.DialPeer(ctx, dst); err != nil { + t.Error("error swarm dialing to peer", err) + } + } + + log.Info("Connecting swarms simultaneously.") + wg.Add(2) + go connect(swarms[0], swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0]) + go connect(swarms[1], swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0]) + wg.Wait() + } + + for _, s := range swarms { + s.Close() + } +} + +func TestSimultOpenMany(t *testing.T) { + // t.Skip("very very slow") + + addrs := 20 + rounds := 10 + if ci.IsRunning() || runtime.GOOS == "darwin" { + // osx has a limit of 256 file descriptors + addrs = 10 + rounds = 5 + } + SubtestSwarm(t, addrs, rounds) +} + +func TestSimultOpenFewStress(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + // t.Skip("skipping for another test") + t.Parallel() + + msgs := 40 + swarms := 2 + rounds := 10 + // rounds := 100 + + for i := 0; i < rounds; i++ { + SubtestSwarm(t, swarms, msgs) + <-time.After(10 * time.Millisecond) + } +} diff --git a/swarm.go b/swarm.go new file mode 100644 index 0000000000000000000000000000000000000000..e975548fe1f8a3dd3399f806b4dd172b5da4f2ac --- /dev/null +++ b/swarm.go @@ -0,0 +1,601 @@ +package swarm + +import ( + "context" + "errors" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p-core/transport" + + logging "github.com/ipfs/go-log" + "github.com/jbenet/goprocess" + goprocessctx "github.com/jbenet/goprocess/context" + + ma "github.com/multiformats/go-multiaddr" +) + +// DialTimeoutLocal is the maximum duration a Dial to local network address +// is allowed to take. +// This includes the time between dialing the raw network connection, +// protocol selection as well the handshake, if applicable. +var DialTimeoutLocal = 5 * time.Second + +var log = logging.Logger("swarm2") + +// ErrSwarmClosed is returned when one attempts to operate on a closed swarm. +var ErrSwarmClosed = errors.New("swarm closed") + +// ErrAddrFiltered is returned when trying to register a connection to a +// filtered address. You shouldn't see this error unless some underlying +// transport is misbehaving. +var ErrAddrFiltered = errors.New("address filtered") + +// ErrDialTimeout is returned when one a dial times out due to the global timeout +var ErrDialTimeout = errors.New("dial timed out") + +// Swarm is a connection muxer, allowing connections to other peers to +// be opened and closed, while still using the same Chan for all +// communication. The Chan sends/receives Messages, which note the +// destination or source Peer. +type Swarm struct { + nextConnID uint64 // guarded by atomic + nextStreamID uint64 // guarded by atomic + + // Close refcount. This allows us to fully wait for the swarm to be torn + // down before continuing. + refs sync.WaitGroup + + local peer.ID + peers peerstore.Peerstore + + conns struct { + sync.RWMutex + m map[peer.ID][]*Conn + } + + listeners struct { + sync.RWMutex + + ifaceListenAddres []ma.Multiaddr + cacheEOL time.Time + + m map[transport.Listener]struct{} + } + + notifs struct { + sync.RWMutex + m map[network.Notifiee]struct{} + } + + transports struct { + sync.RWMutex + m map[int]transport.Transport + } + + // new connection and stream handlers + connh atomic.Value + streamh atomic.Value + + // dialing helpers + dsync *DialSync + backf DialBackoff + limiter *dialLimiter + gater connmgr.ConnectionGater + + proc goprocess.Process + ctx context.Context + bwc metrics.Reporter +} + +// NewSwarm constructs a Swarm. +// +// NOTE: go-libp2p will be moving to dependency injection soon. The variadic +// `extra` interface{} parameter facilitates the future migration. Supported +// elements are: +// - connmgr.ConnectionGater +func NewSwarm(ctx context.Context, local peer.ID, peers peerstore.Peerstore, bwc metrics.Reporter, extra ...interface{}) *Swarm { + s := &Swarm{ + local: local, + peers: peers, + bwc: bwc, + } + + s.conns.m = make(map[peer.ID][]*Conn) + s.listeners.m = make(map[transport.Listener]struct{}) + s.transports.m = make(map[int]transport.Transport) + s.notifs.m = make(map[network.Notifiee]struct{}) + + for _, i := range extra { + switch v := i.(type) { + case connmgr.ConnectionGater: + s.gater = v + } + } + + s.dsync = newDialSync(s.startDialWorker) + s.limiter = newDialLimiter(s.dialAddr, isFdConsumingAddr) + s.proc = goprocessctx.WithContext(ctx) + s.ctx = goprocessctx.OnClosingContext(s.proc) + s.backf.init(s.ctx) + + // Set teardown after setting the context/process so we don't start the + // teardown process early. + s.proc.SetTeardown(s.teardown) + + return s +} + +func (s *Swarm) teardown() error { + // Wait for the context to be canceled. + // This allows other parts of the swarm to detect that we're shutting + // down. + <-s.ctx.Done() + + // Prevents new connections and/or listeners from being added to the swarm. + + s.listeners.Lock() + listeners := s.listeners.m + s.listeners.m = nil + s.listeners.Unlock() + + s.conns.Lock() + conns := s.conns.m + s.conns.m = nil + s.conns.Unlock() + + // Lots of goroutines but we might as well do this in parallel. We want to shut down as fast as + // possible. + + for l := range listeners { + go func(l transport.Listener) { + if err := l.Close(); err != nil { + log.Errorf("error when shutting down listener: %s", err) + } + }(l) + } + + for _, cs := range conns { + for _, c := range cs { + go func(c *Conn) { + if err := c.Close(); err != nil { + log.Errorf("error when shutting down connection: %s", err) + } + }(c) + } + } + + // Wait for everything to finish. + s.refs.Wait() + + // Now close out any transports (if necessary). Do this after closing + // all connections/listeners. + s.transports.Lock() + transports := s.transports.m + s.transports.m = nil + s.transports.Unlock() + + var wg sync.WaitGroup + for _, t := range transports { + if closer, ok := t.(io.Closer); ok { + wg.Add(1) + go func(c io.Closer) { + defer wg.Done() + if err := closer.Close(); err != nil { + log.Errorf("error when closing down transport %T: %s", c, err) + } + }(closer) + } + } + wg.Wait() + + return nil +} + +// Process returns the Process of the swarm +func (s *Swarm) Process() goprocess.Process { + return s.proc +} + +func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn, error) { + var ( + p = tc.RemotePeer() + addr = tc.RemoteMultiaddr() + ) + + // create the Stat object, initializing with the underlying connection Stat if available + var stat network.Stat + if cs, ok := tc.(network.ConnStat); ok { + stat = cs.Stat() + } + stat.Direction = dir + stat.Opened = time.Now() + + // Wrap and register the connection. + c := &Conn{ + conn: tc, + swarm: s, + stat: stat, + id: atomic.AddUint64(&s.nextConnID, 1), + } + + // we ONLY check upgraded connections here so we can send them a Disconnect message. + // If we do this in the Upgrader, we will not be able to do this. + if s.gater != nil { + if allow, _ := s.gater.InterceptUpgraded(c); !allow { + // TODO Send disconnect with reason here + err := tc.Close() + if err != nil { + log.Warnf("failed to close connection with peer %s and addr %s; err: %s", p.Pretty(), addr, err) + } + return nil, ErrGaterDisallowedConnection + } + } + + // Add the public key. + if pk := tc.RemotePublicKey(); pk != nil { + s.peers.AddPubKey(p, pk) + } + + // Clear any backoffs + s.backf.Clear(p) + + // Finally, add the peer. + s.conns.Lock() + // Check if we're still online + if s.conns.m == nil { + s.conns.Unlock() + tc.Close() + return nil, ErrSwarmClosed + } + + c.streams.m = make(map[*Stream]struct{}) + s.conns.m[p] = append(s.conns.m[p], c) + + // Add two swarm refs: + // * One will be decremented after the close notifications fire in Conn.doClose + // * The other will be decremented when Conn.start exits. + s.refs.Add(2) + + // Take the notification lock before releasing the conns lock to block + // Disconnect notifications until after the Connect notifications done. + c.notifyLk.Lock() + s.conns.Unlock() + + s.notifyAll(func(f network.Notifiee) { + f.Connected(s, c) + }) + c.notifyLk.Unlock() + + c.start() + + // TODO: Get rid of this. We use it for identify but that happen much + // earlier (really, inside the transport and, if not then, during the + // notifications). + if h := s.ConnHandler(); h != nil { + go h(c) + } + + return c, nil +} + +// Peerstore returns this swarms internal Peerstore. +func (s *Swarm) Peerstore() peerstore.Peerstore { + return s.peers +} + +// Context returns the context of the swarm +func (s *Swarm) Context() context.Context { + return s.ctx +} + +// Close stops the Swarm. +func (s *Swarm) Close() error { + return s.proc.Close() +} + +// TODO: We probably don't need the conn handlers. + +// SetConnHandler assigns the handler for new connections. +// You will rarely use this. See SetStreamHandler +func (s *Swarm) SetConnHandler(handler network.ConnHandler) { + s.connh.Store(handler) +} + +// ConnHandler gets the handler for new connections. +func (s *Swarm) ConnHandler() network.ConnHandler { + handler, _ := s.connh.Load().(network.ConnHandler) + return handler +} + +// SetStreamHandler assigns the handler for new streams. +func (s *Swarm) SetStreamHandler(handler network.StreamHandler) { + s.streamh.Store(handler) +} + +// StreamHandler gets the handler for new streams. +func (s *Swarm) StreamHandler() network.StreamHandler { + handler, _ := s.streamh.Load().(network.StreamHandler) + return handler +} + +// NewStream creates a new stream on any available connection to peer, dialing +// if necessary. +func (s *Swarm) NewStream(ctx context.Context, p peer.ID) (network.Stream, error) { + log.Debugf("[%s] opening stream to peer [%s]", s.local, p) + + // Algorithm: + // 1. Find the best connection, otherwise, dial. + // 2. Try opening a stream. + // 3. If the underlying connection is, in fact, closed, close the outer + // connection and try again. We do this in case we have a closed + // connection but don't notice it until we actually try to open a + // stream. + // + // Note: We only dial once. + // + // TODO: Try all connections even if we get an error opening a stream on + // a non-closed connection. + dials := 0 + for { + // will prefer direct connections over relayed connections for opening streams + c := s.bestConnToPeer(p) + if c == nil { + if nodial, _ := network.GetNoDial(ctx); nodial { + return nil, network.ErrNoConn + } + + if dials >= DialAttempts { + return nil, errors.New("max dial attempts exceeded") + } + dials++ + + var err error + c, err = s.dialPeer(ctx, p) + if err != nil { + return nil, err + } + } + + s, err := c.NewStream(ctx) + if err != nil { + if c.conn.IsClosed() { + continue + } + return nil, err + } + return s, nil + } +} + +// ConnsToPeer returns all the live connections to peer. +func (s *Swarm) ConnsToPeer(p peer.ID) []network.Conn { + // TODO: Consider sorting the connection list best to worst. Currently, + // it's sorted oldest to newest. + s.conns.RLock() + defer s.conns.RUnlock() + conns := s.conns.m[p] + output := make([]network.Conn, len(conns)) + for i, c := range conns { + output[i] = c + } + return output +} + +func isBetterConn(a, b *Conn) bool { + // If one is transient and not the other, prefer the non-transient connection. + aTransient := a.Stat().Transient + bTransient := b.Stat().Transient + if aTransient != bTransient { + return !aTransient + } + + // If one is direct and not the other, prefer the direct connection. + aDirect := isDirectConn(a) + bDirect := isDirectConn(b) + if aDirect != bDirect { + return aDirect + } + + // Otherwise, prefer the connection with more open streams. + a.streams.Lock() + aLen := len(a.streams.m) + a.streams.Unlock() + + b.streams.Lock() + bLen := len(b.streams.m) + b.streams.Unlock() + + if aLen != bLen { + return aLen > bLen + } + + // finally, pick the last connection. + return true +} + +// bestConnToPeer returns the best connection to peer. +func (s *Swarm) bestConnToPeer(p peer.ID) *Conn { + + // TODO: Prefer some transports over others. + // For now, prefers direct connections over Relayed connections. + // For tie-breaking, select the newest non-closed connection with the most streams. + s.conns.RLock() + defer s.conns.RUnlock() + + var best *Conn + for _, c := range s.conns.m[p] { + if c.conn.IsClosed() { + // We *will* garbage collect this soon anyways. + continue + } + if best == nil || isBetterConn(c, best) { + best = c + } + } + return best +} + +func (s *Swarm) bestAcceptableConnToPeer(ctx context.Context, p peer.ID) *Conn { + conn := s.bestConnToPeer(p) + if conn != nil { + forceDirect, _ := network.GetForceDirectDial(ctx) + if !forceDirect || isDirectConn(conn) { + return conn + } + } + return nil +} + +func isDirectConn(c *Conn) bool { + return c != nil && !c.conn.Transport().Proxy() +} + +// Connectedness returns our "connectedness" state with the given peer. +// +// To check if we have an open connection, use `s.Connectedness(p) == +// network.Connected`. +func (s *Swarm) Connectedness(p peer.ID) network.Connectedness { + if s.bestConnToPeer(p) != nil { + return network.Connected + } + return network.NotConnected +} + +// Conns returns a slice of all connections. +func (s *Swarm) Conns() []network.Conn { + s.conns.RLock() + defer s.conns.RUnlock() + + conns := make([]network.Conn, 0, len(s.conns.m)) + for _, cs := range s.conns.m { + for _, c := range cs { + conns = append(conns, c) + } + } + return conns +} + +// ClosePeer closes all connections to the given peer. +func (s *Swarm) ClosePeer(p peer.ID) error { + conns := s.ConnsToPeer(p) + switch len(conns) { + case 0: + return nil + case 1: + return conns[0].Close() + default: + errCh := make(chan error) + for _, c := range conns { + go func(c network.Conn) { + errCh <- c.Close() + }(c) + } + + var errs []string + for range conns { + err := <-errCh + if err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) > 0 { + return fmt.Errorf("when disconnecting from peer %s: %s", p, strings.Join(errs, ", ")) + } + return nil + } +} + +// Peers returns a copy of the set of peers swarm is connected to. +func (s *Swarm) Peers() []peer.ID { + s.conns.RLock() + defer s.conns.RUnlock() + peers := make([]peer.ID, 0, len(s.conns.m)) + for p := range s.conns.m { + peers = append(peers, p) + } + + return peers +} + +// LocalPeer returns the local peer swarm is associated to. +func (s *Swarm) LocalPeer() peer.ID { + return s.local +} + +// Backoff returns the DialBackoff object for this swarm. +func (s *Swarm) Backoff() *DialBackoff { + return &s.backf +} + +// notifyAll sends a signal to all Notifiees +func (s *Swarm) notifyAll(notify func(network.Notifiee)) { + var wg sync.WaitGroup + + s.notifs.RLock() + wg.Add(len(s.notifs.m)) + for f := range s.notifs.m { + go func(f network.Notifiee) { + defer wg.Done() + notify(f) + }(f) + } + + wg.Wait() + s.notifs.RUnlock() +} + +// Notify signs up Notifiee to receive signals when events happen +func (s *Swarm) Notify(f network.Notifiee) { + s.notifs.Lock() + s.notifs.m[f] = struct{}{} + s.notifs.Unlock() +} + +// StopNotify unregisters Notifiee fromr receiving signals +func (s *Swarm) StopNotify(f network.Notifiee) { + s.notifs.Lock() + delete(s.notifs.m, f) + s.notifs.Unlock() +} + +func (s *Swarm) removeConn(c *Conn) { + p := c.RemotePeer() + + s.conns.Lock() + defer s.conns.Unlock() + cs := s.conns.m[p] + for i, ci := range cs { + if ci == c { + if len(cs) == 1 { + delete(s.conns.m, p) + } else { + // NOTE: We're intentionally preserving order. + // This way, connections to a peer are always + // sorted oldest to newest. + copy(cs[i:], cs[i+1:]) + cs[len(cs)-1] = nil + s.conns.m[p] = cs[:len(cs)-1] + } + return + } + } +} + +// String returns a string representation of Network. +func (s *Swarm) String() string { + return fmt.Sprintf("", s.LocalPeer()) +} + +// Swarm is a Network. +var _ network.Network = (*Swarm)(nil) +var _ transport.TransportNetwork = (*Swarm)(nil) diff --git a/swarm_addr.go b/swarm_addr.go new file mode 100644 index 0000000000000000000000000000000000000000..88bc626bd277d2b696244ff78162d17a46901401 --- /dev/null +++ b/swarm_addr.go @@ -0,0 +1,72 @@ +package swarm + +import ( + "time" + + addrutil "github.com/libp2p/go-addr-util" + ma "github.com/multiformats/go-multiaddr" +) + +// ListenAddresses returns a list of addresses at which this swarm listens. +func (s *Swarm) ListenAddresses() []ma.Multiaddr { + s.listeners.RLock() + defer s.listeners.RUnlock() + return s.listenAddressesNoLock() +} + +func (s *Swarm) listenAddressesNoLock() []ma.Multiaddr { + addrs := make([]ma.Multiaddr, 0, len(s.listeners.m)) + for l := range s.listeners.m { + addrs = append(addrs, l.Multiaddr()) + } + return addrs +} + +const ifaceAddrsCacheDuration = 1 * time.Minute + +// InterfaceListenAddresses returns a list of addresses at which this swarm +// listens. It expands "any interface" addresses (/ip4/0.0.0.0, /ip6/::) to +// use the known local interfaces. +func (s *Swarm) InterfaceListenAddresses() ([]ma.Multiaddr, error) { + s.listeners.RLock() // RLock start + + ifaceListenAddres := s.listeners.ifaceListenAddres + isEOL := time.Now().After(s.listeners.cacheEOL) + s.listeners.RUnlock() // RLock end + + if !isEOL { + // Cache is valid, clone the slice + return append(ifaceListenAddres[:0:0], ifaceListenAddres...), nil + } + + // Cache is not valid + // Perfrom double checked locking + + s.listeners.Lock() // Lock start + + ifaceListenAddres = s.listeners.ifaceListenAddres + isEOL = time.Now().After(s.listeners.cacheEOL) + if isEOL { + // Cache is still invalid + listenAddres := s.listenAddressesNoLock() + if len(listenAddres) > 0 { + // We're actually listening on addresses. + var err error + ifaceListenAddres, err = addrutil.ResolveUnspecifiedAddresses(listenAddres, nil) + + if err != nil { + s.listeners.Unlock() // Lock early exit + return nil, err + } + } else { + ifaceListenAddres = nil + } + + s.listeners.ifaceListenAddres = ifaceListenAddres + s.listeners.cacheEOL = time.Now().Add(ifaceAddrsCacheDuration) + } + + s.listeners.Unlock() // Lock end + + return append(ifaceListenAddres[:0:0], ifaceListenAddres...), nil +} diff --git a/swarm_addr_test.go b/swarm_addr_test.go new file mode 100644 index 0000000000000000000000000000000000000000..baeac46203aae9c67c26cdb28b07100ab3f3dad0 --- /dev/null +++ b/swarm_addr_test.go @@ -0,0 +1,73 @@ +package swarm_test + +import ( + "context" + "testing" + + "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p-core/test" + + ma "github.com/multiformats/go-multiaddr" + + swarmt "github.com/libp2p/go-libp2p-swarm/testing" +) + +func TestDialBadAddrs(t *testing.T) { + + m := func(s string) ma.Multiaddr { + maddr, err := ma.NewMultiaddr(s) + if err != nil { + t.Fatal(err) + } + return maddr + } + + ctx := context.Background() + s := makeSwarms(ctx, t, 1)[0] + + test := func(a ma.Multiaddr) { + p := test.RandPeerIDFatal(t) + s.Peerstore().AddAddr(p, a, peerstore.PermanentAddrTTL) + if _, err := s.DialPeer(ctx, p); err == nil { + t.Errorf("swarm should not dial: %s", p) + } + } + + test(m("/ip6/fe80::1")) // link local + test(m("/ip6/fe80::100")) // link local + test(m("/ip4/127.0.0.1/udp/1234/utp")) // utp +} + +func TestAddrRace(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := makeSwarms(ctx, t, 1)[0] + defer s.Close() + + a1, err := s.InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + a2, err := s.InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + + if len(a1) > 0 && len(a2) > 0 && &a1[0] == &a2[0] { + t.Fatal("got the exact same address set twice; this could lead to data races") + } +} + +func TestAddressesWithoutListening(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := swarmt.GenSwarm(t, ctx, swarmt.OptDialOnly) + + a1, err := s.InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + if len(a1) != 0 { + t.Fatalf("expected to be listening on no addresses, was listening on %d", len(a1)) + } +} diff --git a/swarm_conn.go b/swarm_conn.go new file mode 100644 index 0000000000000000000000000000000000000000..3d5f8f3735171282d7b13b506b680cc899c2d027 --- /dev/null +++ b/swarm_conn.go @@ -0,0 +1,243 @@ +package swarm + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + ic "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/mux" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/transport" + + ma "github.com/multiformats/go-multiaddr" +) + +// TODO: Put this elsewhere. + +// ErrConnClosed is returned when operating on a closed connection. +var ErrConnClosed = errors.New("connection closed") + +// Conn is the connection type used by swarm. In general, you won't use this +// type directly. +type Conn struct { + id uint64 + conn transport.CapableConn + swarm *Swarm + + closeOnce sync.Once + err error + + notifyLk sync.Mutex + + streams struct { + sync.Mutex + m map[*Stream]struct{} + } + + stat network.Stat +} + +func (c *Conn) ID() string { + // format: - + return fmt.Sprintf("%s-%d", c.RemotePeer().Pretty()[0:10], c.id) +} + +// Close closes this connection. +// +// Note: This method won't wait for the close notifications to finish as that +// would create a deadlock when called from an open notification (because all +// open notifications must finish before we can fire off the close +// notifications). +func (c *Conn) Close() error { + c.closeOnce.Do(c.doClose) + return c.err +} + +func (c *Conn) doClose() { + c.swarm.removeConn(c) + + // Prevent new streams from opening. + c.streams.Lock() + streams := c.streams.m + c.streams.m = nil + c.streams.Unlock() + + c.err = c.conn.Close() + + // This is just for cleaning up state. The connection has already been closed. + // We *could* optimize this but it really isn't worth it. + for s := range streams { + s.Reset() + } + + // do this in a goroutine to avoid deadlocking if we call close in an open notification. + go func() { + // prevents us from issuing close notifications before finishing the open notifications + c.notifyLk.Lock() + defer c.notifyLk.Unlock() + + c.swarm.notifyAll(func(f network.Notifiee) { + f.Disconnected(c.swarm, c) + }) + c.swarm.refs.Done() // taken in Swarm.addConn + }() +} + +func (c *Conn) removeStream(s *Stream) { + c.streams.Lock() + delete(c.streams.m, s) + c.streams.Unlock() +} + +// listens for new streams. +// +// The caller must take a swarm ref before calling. This function decrements the +// swarm ref count. +func (c *Conn) start() { + go func() { + defer c.swarm.refs.Done() + defer c.Close() + + for { + ts, err := c.conn.AcceptStream() + if err != nil { + return + } + c.swarm.refs.Add(1) + go func() { + s, err := c.addStream(ts, network.DirInbound) + + // Don't defer this. We don't want to block + // swarm shutdown on the connection handler. + c.swarm.refs.Done() + + // We only get an error here when the swarm is closed or closing. + if err != nil { + return + } + + if h := c.swarm.StreamHandler(); h != nil { + h(s) + } + }() + } + }() +} + +func (c *Conn) String() string { + return fmt.Sprintf( + " %s (%s)>", + c.conn.Transport(), + c.conn.LocalMultiaddr(), + c.conn.LocalPeer().Pretty(), + c.conn.RemoteMultiaddr(), + c.conn.RemotePeer().Pretty(), + ) +} + +// LocalMultiaddr is the Multiaddr on this side +func (c *Conn) LocalMultiaddr() ma.Multiaddr { + return c.conn.LocalMultiaddr() +} + +// LocalPeer is the Peer on our side of the connection +func (c *Conn) LocalPeer() peer.ID { + return c.conn.LocalPeer() +} + +// RemoteMultiaddr is the Multiaddr on the remote side +func (c *Conn) RemoteMultiaddr() ma.Multiaddr { + return c.conn.RemoteMultiaddr() +} + +// RemotePeer is the Peer on the remote side +func (c *Conn) RemotePeer() peer.ID { + return c.conn.RemotePeer() +} + +// LocalPrivateKey is the public key of the peer on this side +func (c *Conn) LocalPrivateKey() ic.PrivKey { + return c.conn.LocalPrivateKey() +} + +// RemotePublicKey is the public key of the peer on the remote side +func (c *Conn) RemotePublicKey() ic.PubKey { + return c.conn.RemotePublicKey() +} + +// Stat returns metadata pertaining to this connection +func (c *Conn) Stat() network.Stat { + return c.stat +} + +// NewStream returns a new Stream from this connection +func (c *Conn) NewStream(ctx context.Context) (network.Stream, error) { + if c.Stat().Transient { + if useTransient, _ := network.GetUseTransient(ctx); !useTransient { + return nil, network.ErrTransientConn + } + } + + ts, err := c.conn.OpenStream(ctx) + + if err != nil { + return nil, err + } + return c.addStream(ts, network.DirOutbound) +} + +func (c *Conn) addStream(ts mux.MuxedStream, dir network.Direction) (*Stream, error) { + c.streams.Lock() + // Are we still online? + if c.streams.m == nil { + c.streams.Unlock() + ts.Reset() + return nil, ErrConnClosed + } + + // Wrap and register the stream. + stat := network.Stat{ + Direction: dir, + Opened: time.Now(), + } + s := &Stream{ + stream: ts, + conn: c, + stat: stat, + id: atomic.AddUint64(&c.swarm.nextStreamID, 1), + } + c.streams.m[s] = struct{}{} + + // Released once the stream disconnect notifications have finished + // firing (in Swarm.remove). + c.swarm.refs.Add(1) + + // Take the notification lock before releasing the streams lock to block + // StreamClose notifications until after the StreamOpen notifications + // done. + s.notifyLk.Lock() + c.streams.Unlock() + + c.swarm.notifyAll(func(f network.Notifiee) { + f.OpenedStream(c.swarm, s) + }) + s.notifyLk.Unlock() + + return s, nil +} + +// GetStreams returns the streams associated with this connection. +func (c *Conn) GetStreams() []network.Stream { + c.streams.Lock() + defer c.streams.Unlock() + streams := make([]network.Stream, 0, len(c.streams.m)) + for s := range c.streams.m { + streams = append(streams, s) + } + return streams +} diff --git a/swarm_dial.go b/swarm_dial.go new file mode 100644 index 0000000000000000000000000000000000000000..e130c18301a0d2015c27e462efefba50dc86c42c --- /dev/null +++ b/swarm_dial.go @@ -0,0 +1,752 @@ +package swarm + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/transport" + + addrutil "github.com/libp2p/go-addr-util" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" +) + +// Diagram of dial sync: +// +// many callers of Dial() synched w. dials many addrs results to callers +// ----------------------\ dialsync use earliest /-------------- +// -----------------------\ |----------\ /---------------- +// ------------------------>------------<------- >---------<----------------- +// -----------------------| \----x \---------------- +// ----------------------| \-----x \--------------- +// any may fail if no addr at end +// retry dialAttempt x + +var ( + // ErrDialBackoff is returned by the backoff code when a given peer has + // been dialed too frequently + ErrDialBackoff = errors.New("dial backoff") + + // ErrDialToSelf is returned if we attempt to dial our own peer + ErrDialToSelf = errors.New("dial to self attempted") + + // ErrNoTransport is returned when we don't know a transport for the + // given multiaddr. + ErrNoTransport = errors.New("no transport for protocol") + + // ErrAllDialsFailed is returned when connecting to a peer has ultimately failed + ErrAllDialsFailed = errors.New("all dials failed") + + // ErrNoAddresses is returned when we fail to find any addresses for a + // peer we're trying to dial. + ErrNoAddresses = errors.New("no addresses") + + // ErrNoGoodAddresses is returned when we find addresses for a peer but + // can't use any of them. + ErrNoGoodAddresses = errors.New("no good addresses") + + // ErrGaterDisallowedConnection is returned when the gater prevents us from + // forming a connection with a peer. + ErrGaterDisallowedConnection = errors.New("gater disallows connection to peer") +) + +// DialAttempts governs how many times a goroutine will try to dial a given peer. +// Note: this is down to one, as we have _too many dials_ atm. To add back in, +// add loop back in Dial(.) +const DialAttempts = 1 + +// ConcurrentFdDials is the number of concurrent outbound dials over transports +// that consume file descriptors +const ConcurrentFdDials = 160 + +// DefaultPerPeerRateLimit is the number of concurrent outbound dials to make +// per peer +const DefaultPerPeerRateLimit = 8 + +// dialbackoff is a struct used to avoid over-dialing the same, dead peers. +// Whenever we totally time out on a peer (all three attempts), we add them +// to dialbackoff. Then, whenevers goroutines would _wait_ (dialsync), they +// check dialbackoff. If it's there, they don't wait and exit promptly with +// an error. (the single goroutine that is actually dialing continues to +// dial). If a dial is successful, the peer is removed from backoff. +// Example: +// +// for { +// if ok, wait := dialsync.Lock(p); !ok { +// if backoff.Backoff(p) { +// return errDialFailed +// } +// <-wait +// continue +// } +// defer dialsync.Unlock(p) +// c, err := actuallyDial(p) +// if err != nil { +// dialbackoff.AddBackoff(p) +// continue +// } +// dialbackoff.Clear(p) +// } +// + +// DialBackoff is a type for tracking peer dial backoffs. +// +// * It's safe to use its zero value. +// * It's thread-safe. +// * It's *not* safe to move this type after using. +type DialBackoff struct { + entries map[peer.ID]map[string]*backoffAddr + lock sync.RWMutex +} + +type backoffAddr struct { + tries int + until time.Time +} + +func (db *DialBackoff) init(ctx context.Context) { + if db.entries == nil { + db.entries = make(map[peer.ID]map[string]*backoffAddr) + } + go db.background(ctx) +} + +func (db *DialBackoff) background(ctx context.Context) { + ticker := time.NewTicker(BackoffMax) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + db.cleanup() + } + } +} + +// Backoff returns whether the client should backoff from dialing +// peer p at address addr +func (db *DialBackoff) Backoff(p peer.ID, addr ma.Multiaddr) (backoff bool) { + db.lock.Lock() + defer db.lock.Unlock() + + ap, found := db.entries[p][string(addr.Bytes())] + return found && time.Now().Before(ap.until) +} + +// BackoffBase is the base amount of time to backoff (default: 5s). +var BackoffBase = time.Second * 5 + +// BackoffCoef is the backoff coefficient (default: 1s). +var BackoffCoef = time.Second + +// BackoffMax is the maximum backoff time (default: 5m). +var BackoffMax = time.Minute * 5 + +// AddBackoff lets other nodes know that we've entered backoff with +// peer p, so dialers should not wait unnecessarily. We still will +// attempt to dial with one goroutine, in case we get through. +// +// Backoff is not exponential, it's quadratic and computed according to the +// following formula: +// +// BackoffBase + BakoffCoef * PriorBackoffs^2 +// +// Where PriorBackoffs is the number of previous backoffs. +func (db *DialBackoff) AddBackoff(p peer.ID, addr ma.Multiaddr) { + saddr := string(addr.Bytes()) + db.lock.Lock() + defer db.lock.Unlock() + bp, ok := db.entries[p] + if !ok { + bp = make(map[string]*backoffAddr, 1) + db.entries[p] = bp + } + ba, ok := bp[saddr] + if !ok { + bp[saddr] = &backoffAddr{ + tries: 1, + until: time.Now().Add(BackoffBase), + } + return + } + + backoffTime := BackoffBase + BackoffCoef*time.Duration(ba.tries*ba.tries) + if backoffTime > BackoffMax { + backoffTime = BackoffMax + } + ba.until = time.Now().Add(backoffTime) + ba.tries++ +} + +// Clear removes a backoff record. Clients should call this after a +// successful Dial. +func (db *DialBackoff) Clear(p peer.ID) { + db.lock.Lock() + defer db.lock.Unlock() + delete(db.entries, p) +} + +func (db *DialBackoff) cleanup() { + db.lock.Lock() + defer db.lock.Unlock() + now := time.Now() + for p, e := range db.entries { + good := false + for _, backoff := range e { + backoffTime := BackoffBase + BackoffCoef*time.Duration(backoff.tries*backoff.tries) + if backoffTime > BackoffMax { + backoffTime = BackoffMax + } + if now.Before(backoff.until.Add(backoffTime)) { + good = true + break + } + } + if !good { + delete(db.entries, p) + } + } +} + +// DialPeer connects to a peer. +// +// The idea is that the client of Swarm does not need to know what network +// the connection will happen over. Swarm can use whichever it choses. +// This allows us to use various transport protocols, do NAT traversal/relay, +// etc. to achieve connection. +func (s *Swarm) DialPeer(ctx context.Context, p peer.ID) (network.Conn, error) { + if s.gater != nil && !s.gater.InterceptPeerDial(p) { + log.Debugf("gater disallowed outbound connection to peer %s", p.Pretty()) + return nil, &DialError{Peer: p, Cause: ErrGaterDisallowedConnection} + } + + // Avoid typed nil issues. + c, err := s.dialPeer(ctx, p) + if err != nil { + return nil, err + } + return c, nil +} + +// internal dial method that returns an unwrapped conn +// +// It is gated by the swarm's dial synchronization systems: dialsync and +// dialbackoff. +func (s *Swarm) dialPeer(ctx context.Context, p peer.ID) (*Conn, error) { + log.Debugw("dialing peer", "from", s.local, "to", p) + err := p.Validate() + if err != nil { + return nil, err + } + + if p == s.local { + return nil, ErrDialToSelf + } + + // check if we already have an open (usable) connection first + conn := s.bestAcceptableConnToPeer(ctx, p) + if conn != nil { + return conn, nil + } + + // apply the DialPeer timeout + ctx, cancel := context.WithTimeout(ctx, network.GetDialPeerTimeout(ctx)) + defer cancel() + + conn, err = s.dsync.DialLock(ctx, p) + if err == nil { + return conn, nil + } + + log.Debugf("network for %s finished dialing %s", s.local, p) + + if ctx.Err() != nil { + // Context error trumps any dial errors as it was likely the ultimate cause. + return nil, ctx.Err() + } + + if s.ctx.Err() != nil { + // Ok, so the swarm is shutting down. + return nil, ErrSwarmClosed + } + + return nil, err +} + +/////////////////////////////////////////////////////////////////////////////////// +// lo and behold, The Dialer +// TODO explain how all this works +////////////////////////////////////////////////////////////////////////////////// + +type dialRequest struct { + ctx context.Context + resch chan dialResponse +} + +type dialResponse struct { + conn *Conn + err error +} + +// startDialWorker starts an active dial goroutine that synchronizes and executes concurrent dials +func (s *Swarm) startDialWorker(ctx context.Context, p peer.ID, reqch <-chan dialRequest) error { + if p == s.local { + return ErrDialToSelf + } + + go s.dialWorkerLoop(ctx, p, reqch) + return nil +} + +func (s *Swarm) dialWorkerLoop(ctx context.Context, p peer.ID, reqch <-chan dialRequest) { + defer s.limiter.clearAllPeerDials(p) + + type pendRequest struct { + req dialRequest // the original request + err *DialError // dial error accumulator + addrs map[ma.Multiaddr]struct{} // pending addr dials + } + + type addrDial struct { + addr ma.Multiaddr + ctx context.Context + conn *Conn + err error + requests []int + dialed bool + } + + reqno := 0 + requests := make(map[int]*pendRequest) + pending := make(map[ma.Multiaddr]*addrDial) + + dispatchError := func(ad *addrDial, err error) { + ad.err = err + for _, reqno := range ad.requests { + pr, ok := requests[reqno] + if !ok { + // has already been dispatched + continue + } + + // accumulate the error + pr.err.recordErr(ad.addr, err) + + delete(pr.addrs, ad.addr) + if len(pr.addrs) == 0 { + // all addrs have erred, dispatch dial error + // but first do a last one check in case an acceptable connection has landed from + // a simultaneous dial that started later and added new acceptable addrs + c := s.bestAcceptableConnToPeer(pr.req.ctx, p) + if c != nil { + pr.req.resch <- dialResponse{conn: c} + } else { + pr.req.resch <- dialResponse{err: pr.err} + } + delete(requests, reqno) + } + } + + ad.requests = nil + + // if it was a backoff, clear the address dial so that it doesn't inhibit new dial requests. + // this is necessary to support active listen scenarios, where a new dial comes in while + // another dial is in progress, and needs to do a direct connection without inhibitions from + // dial backoff. + // it is also necessary to preserve consisent behaviour with the old dialer -- TestDialBackoff + // regresses without this. + if err == ErrDialBackoff { + delete(pending, ad.addr) + } + } + + var triggerDial <-chan struct{} + triggerNow := make(chan struct{}) + close(triggerNow) + + var nextDial []ma.Multiaddr + active := 0 + done := false // true when the request channel has been closed + connected := false // true when a connection has been successfully established + + resch := make(chan dialResult) + +loop: + for { + select { + case req, ok := <-reqch: + if !ok { + // request channel has been closed, wait for pending dials to complete + if active > 0 { + done = true + reqch = nil + triggerDial = nil + continue loop + } + + // no active dials, we are done + return + } + + c := s.bestAcceptableConnToPeer(req.ctx, p) + if c != nil { + req.resch <- dialResponse{conn: c} + continue loop + } + + addrs, err := s.addrsForDial(req.ctx, p) + if err != nil { + req.resch <- dialResponse{err: err} + continue loop + } + + // at this point, len(addrs) > 0 or else it would be error from addrsForDial + // ranke them to process in order + addrs = s.rankAddrs(addrs) + + // create the pending request object + pr := &pendRequest{ + req: req, + err: &DialError{Peer: p}, + addrs: make(map[ma.Multiaddr]struct{}), + } + for _, a := range addrs { + pr.addrs[a] = struct{}{} + } + + // check if any of the addrs has been successfully dialed and accumulate + // errors from complete dials while collecting new addrs to dial/join + var todial []ma.Multiaddr + var tojoin []*addrDial + + for _, a := range addrs { + ad, ok := pending[a] + if !ok { + todial = append(todial, a) + continue + } + + if ad.conn != nil { + // dial to this addr was successful, complete the request + req.resch <- dialResponse{conn: ad.conn} + continue loop + } + + if ad.err != nil { + // dial to this addr errored, accumulate the error + pr.err.recordErr(a, ad.err) + delete(pr.addrs, a) + continue + } + + // dial is still pending, add to the join list + tojoin = append(tojoin, ad) + } + + if len(todial) == 0 && len(tojoin) == 0 { + // all request applicable addrs have been dialed, we must have errored + req.resch <- dialResponse{err: pr.err} + continue loop + } + + // the request has some pending or new dials, track it and schedule new dials + reqno++ + requests[reqno] = pr + + for _, ad := range tojoin { + if !ad.dialed { + ad.ctx = s.mergeDialContexts(ad.ctx, req.ctx) + } + ad.requests = append(ad.requests, reqno) + } + + if len(todial) > 0 { + for _, a := range todial { + pending[a] = &addrDial{addr: a, ctx: req.ctx, requests: []int{reqno}} + } + + nextDial = append(nextDial, todial...) + nextDial = s.rankAddrs(nextDial) + + // trigger a new dial now to account for the new addrs we added + triggerDial = triggerNow + } + + case <-triggerDial: + for _, addr := range nextDial { + // spawn the dial + ad := pending[addr] + err := s.dialNextAddr(ad.ctx, p, addr, resch) + if err != nil { + dispatchError(ad, err) + } + } + + nextDial = nil + triggerDial = nil + + case res := <-resch: + active-- + + if res.Conn != nil { + connected = true + } + + if done && active == 0 { + if res.Conn != nil { + // we got an actual connection, but the dial has been cancelled + // Should we close it? I think not, we should just add it to the swarm + _, err := s.addConn(res.Conn, network.DirOutbound) + if err != nil { + // well duh, now we have to close it + res.Conn.Close() + } + } + return + } + + ad := pending[res.Addr] + + if res.Conn != nil { + // we got a connection, add it to the swarm + conn, err := s.addConn(res.Conn, network.DirOutbound) + if err != nil { + // oops no, we failed to add it to the swarm + res.Conn.Close() + dispatchError(ad, err) + if active == 0 && len(nextDial) > 0 { + triggerDial = triggerNow + } + continue loop + } + + // dispatch to still pending requests + for _, reqno := range ad.requests { + pr, ok := requests[reqno] + if !ok { + // it has already dispatched a connection + continue + } + + pr.req.resch <- dialResponse{conn: conn} + delete(requests, reqno) + } + + ad.conn = conn + ad.requests = nil + + continue loop + } + + // it must be an error -- add backoff if applicable and dispatch + if res.Err != context.Canceled && !connected { + // we only add backoff if there has not been a successful connection + // for consistency with the old dialer behavior. + s.backf.AddBackoff(p, res.Addr) + } + + dispatchError(ad, res.Err) + if active == 0 && len(nextDial) > 0 { + triggerDial = triggerNow + } + } + } +} + +func (s *Swarm) addrsForDial(ctx context.Context, p peer.ID) ([]ma.Multiaddr, error) { + peerAddrs := s.peers.Addrs(p) + if len(peerAddrs) == 0 { + return nil, ErrNoAddresses + } + + goodAddrs := s.filterKnownUndialables(p, peerAddrs) + if forceDirect, _ := network.GetForceDirectDial(ctx); forceDirect { + goodAddrs = addrutil.FilterAddrs(goodAddrs, s.nonProxyAddr) + } + + if len(goodAddrs) == 0 { + return nil, ErrNoGoodAddresses + } + + return goodAddrs, nil +} + +func (s *Swarm) mergeDialContexts(a, b context.Context) context.Context { + dialCtx := a + + if simConnect, reason := network.GetSimultaneousConnect(b); simConnect { + if simConnect, _ := network.GetSimultaneousConnect(a); !simConnect { + dialCtx = network.WithSimultaneousConnect(dialCtx, reason) + } + } + + return dialCtx +} + +func (s *Swarm) dialNextAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr, resch chan dialResult) error { + // check the dial backoff + if forceDirect, _ := network.GetForceDirectDial(ctx); !forceDirect { + if s.backf.Backoff(p, addr) { + return ErrDialBackoff + } + } + + // start the dial + s.limitedDial(ctx, p, addr, resch) + + return nil +} + +func (s *Swarm) canDial(addr ma.Multiaddr) bool { + t := s.TransportForDialing(addr) + return t != nil && t.CanDial(addr) +} + +func (s *Swarm) nonProxyAddr(addr ma.Multiaddr) bool { + t := s.TransportForDialing(addr) + return !t.Proxy() +} + +// ranks addresses in descending order of preference for dialing, with the following rules: +// NonRelay > Relay +// NonWS > WS +// Private > Public +// UDP > TCP +func (s *Swarm) rankAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { + addrTier := func(a ma.Multiaddr) (tier int) { + if isRelayAddr(a) { + tier |= 0b1000 + } + if isExpensiveAddr(a) { + tier |= 0b0100 + } + if !manet.IsPrivateAddr(a) { + tier |= 0b0010 + } + if isFdConsumingAddr(a) { + tier |= 0b0001 + } + + return tier + } + + tiers := make([][]ma.Multiaddr, 16) + for _, a := range addrs { + tier := addrTier(a) + tiers[tier] = append(tiers[tier], a) + } + + result := make([]ma.Multiaddr, 0, len(addrs)) + for _, tier := range tiers { + result = append(result, tier...) + } + + return result +} + +// filterKnownUndialables takes a list of multiaddrs, and removes those +// that we definitely don't want to dial: addresses configured to be blocked, +// IPv6 link-local addresses, addresses without a dial-capable transport, +// and addresses that we know to be our own. +// This is an optimization to avoid wasting time on dials that we know are going to fail. +func (s *Swarm) filterKnownUndialables(p peer.ID, addrs []ma.Multiaddr) []ma.Multiaddr { + lisAddrs, _ := s.InterfaceListenAddresses() + var ourAddrs []ma.Multiaddr + for _, addr := range lisAddrs { + protos := addr.Protocols() + // we're only sure about filtering out /ip4 and /ip6 addresses, so far + if protos[0].Code == ma.P_IP4 || protos[0].Code == ma.P_IP6 { + ourAddrs = append(ourAddrs, addr) + } + } + + return addrutil.FilterAddrs(addrs, + addrutil.SubtractFilter(ourAddrs...), + s.canDial, + // TODO: Consider allowing link-local addresses + addrutil.AddrOverNonLocalIP, + func(addr ma.Multiaddr) bool { + return s.gater == nil || s.gater.InterceptAddrDial(p, addr) + }, + ) +} + +// limitedDial will start a dial to the given peer when +// it is able, respecting the various different types of rate +// limiting that occur without using extra goroutines per addr +func (s *Swarm) limitedDial(ctx context.Context, p peer.ID, a ma.Multiaddr, resp chan dialResult) { + s.limiter.AddDialJob(&dialJob{ + addr: a, + peer: p, + resp: resp, + ctx: ctx, + }) +} + +// dialAddr is the actual dial for an addr, indirectly invoked through the limiter +func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (transport.CapableConn, error) { + // Just to double check. Costs nothing. + if s.local == p { + return nil, ErrDialToSelf + } + log.Debugf("%s swarm dialing %s %s", s.local, p, addr) + + tpt := s.TransportForDialing(addr) + if tpt == nil { + return nil, ErrNoTransport + } + + connC, err := tpt.Dial(ctx, addr, p) + if err != nil { + return nil, err + } + + // Trust the transport? Yeah... right. + if connC.RemotePeer() != p { + connC.Close() + err = fmt.Errorf("BUG in transport %T: tried to dial %s, dialed %s", p, connC.RemotePeer(), tpt) + log.Error(err) + return nil, err + } + + // success! we got one! + return connC, nil +} + +// TODO We should have a `IsFdConsuming() bool` method on the `Transport` interface in go-libp2p-core/transport. +// This function checks if any of the transport protocols in the address requires a file descriptor. +// For now: +// A Non-circuit address which has the TCP/UNIX protocol is deemed FD consuming. +// For a circuit-relay address, we look at the address of the relay server/proxy +// and use the same logic as above to decide. +func isFdConsumingAddr(addr ma.Multiaddr) bool { + first, _ := ma.SplitFunc(addr, func(c ma.Component) bool { + return c.Protocol().Code == ma.P_CIRCUIT + }) + + // for safety + if first == nil { + return true + } + + _, err1 := first.ValueForProtocol(ma.P_TCP) + _, err2 := first.ValueForProtocol(ma.P_UNIX) + return err1 == nil || err2 == nil +} + +func isExpensiveAddr(addr ma.Multiaddr) bool { + _, err1 := addr.ValueForProtocol(ma.P_WS) + _, err2 := addr.ValueForProtocol(ma.P_WSS) + return err1 == nil || err2 == nil +} + +func isRelayAddr(addr ma.Multiaddr) bool { + _, err := addr.ValueForProtocol(ma.P_CIRCUIT) + return err == nil +} diff --git a/swarm_listen.go b/swarm_listen.go new file mode 100644 index 0000000000000000000000000000000000000000..c064ae851e7a7f38dd94fee79d58541aed81a20b --- /dev/null +++ b/swarm_listen.go @@ -0,0 +1,121 @@ +package swarm + +import ( + "fmt" + "time" + + "github.com/libp2p/go-libp2p-core/network" + + ma "github.com/multiformats/go-multiaddr" +) + +// Listen sets up listeners for all of the given addresses. +// It returns as long as we successfully listen on at least *one* address. +func (s *Swarm) Listen(addrs ...ma.Multiaddr) error { + errs := make([]error, len(addrs)) + var succeeded int + for i, a := range addrs { + if err := s.AddListenAddr(a); err != nil { + errs[i] = err + } else { + succeeded++ + } + } + + for i, e := range errs { + if e != nil { + log.Warnw("listening failed", "on", addrs[i], "error", errs[i]) + } + } + + if succeeded == 0 && len(addrs) > 0 { + return fmt.Errorf("failed to listen on any addresses: %s", errs) + } + + return nil +} + +// AddListenAddr tells the swarm to listen on a single address. Unlike Listen, +// this method does not attempt to filter out bad addresses. +func (s *Swarm) AddListenAddr(a ma.Multiaddr) error { + tpt := s.TransportForListening(a) + if tpt == nil { + // TransportForListening will return nil if either: + // 1. No transport has been registered. + // 2. We're closed (so we've nulled out the transport map. + // + // Distinguish between these two cases to avoid confusing users. + select { + case <-s.proc.Closing(): + return ErrSwarmClosed + default: + return ErrNoTransport + } + } + + list, err := tpt.Listen(a) + if err != nil { + return err + } + + s.listeners.Lock() + if s.listeners.m == nil { + s.listeners.Unlock() + list.Close() + return ErrSwarmClosed + } + s.refs.Add(1) + s.listeners.m[list] = struct{}{} + s.listeners.cacheEOL = time.Time{} + s.listeners.Unlock() + + maddr := list.Multiaddr() + + // signal to our notifiees on listen. + s.notifyAll(func(n network.Notifiee) { + n.Listen(s, maddr) + }) + + go func() { + defer func() { + list.Close() + s.listeners.Lock() + delete(s.listeners.m, list) + s.listeners.cacheEOL = time.Time{} + s.listeners.Unlock() + + // signal to our notifiees on listen close. + s.notifyAll(func(n network.Notifiee) { + n.ListenClose(s, maddr) + }) + s.refs.Done() + }() + for { + c, err := list.Accept() + if err != nil { + if s.ctx.Err() == nil { + // only log if the swarm is still running. + log.Errorf("swarm listener accept error: %s", err) + } + return + } + + log.Debugf("swarm listener accepted connection: %s", c) + s.refs.Add(1) + go func() { + defer s.refs.Done() + _, err := s.addConn(c, network.DirInbound) + switch err { + case nil: + case ErrSwarmClosed: + // ignore. + return + default: + log.Warnw("adding connection failed", "to", a, "error", err) + return + } + }() + } + }() + return nil +} diff --git a/swarm_net_test.go b/swarm_net_test.go new file mode 100644 index 0000000000000000000000000000000000000000..05984f6b025b9b38e71737e58d233835abf06b58 --- /dev/null +++ b/swarm_net_test.go @@ -0,0 +1,181 @@ +package swarm_test + +import ( + "context" + "fmt" + "io/ioutil" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/network" + + . "github.com/libp2p/go-libp2p-swarm/testing" +) + +// TestConnectednessCorrect starts a few networks, connects a few +// and tests Connectedness value is correct. +func TestConnectednessCorrect(t *testing.T) { + + ctx := context.Background() + + nets := make([]network.Network, 4) + for i := 0; i < 4; i++ { + nets[i] = GenSwarm(t, ctx) + } + + // connect 0-1, 0-2, 0-3, 1-2, 2-3 + + dial := func(a, b network.Network) { + DivulgeAddresses(b, a) + if _, err := a.DialPeer(ctx, b.LocalPeer()); err != nil { + t.Fatalf("Failed to dial: %s", err) + } + } + + dial(nets[0], nets[1]) + dial(nets[0], nets[3]) + dial(nets[1], nets[2]) + dial(nets[3], nets[2]) + + // The notifications for new connections get sent out asynchronously. + // There is the potential for a race condition here, so we sleep to ensure + // that they have been received. + time.Sleep(time.Millisecond * 100) + + // test those connected show up correctly + + // test connected + expectConnectedness(t, nets[0], nets[1], network.Connected) + expectConnectedness(t, nets[0], nets[3], network.Connected) + expectConnectedness(t, nets[1], nets[2], network.Connected) + expectConnectedness(t, nets[3], nets[2], network.Connected) + + // test not connected + expectConnectedness(t, nets[0], nets[2], network.NotConnected) + expectConnectedness(t, nets[1], nets[3], network.NotConnected) + + if len(nets[0].Peers()) != 2 { + t.Fatal("expected net 0 to have two peers") + } + + if len(nets[2].Peers()) != 2 { + t.Fatal("expected net 2 to have two peers") + } + + if len(nets[1].ConnsToPeer(nets[3].LocalPeer())) != 0 { + t.Fatal("net 1 should have no connections to net 3") + } + + if err := nets[2].ClosePeer(nets[1].LocalPeer()); err != nil { + t.Fatal(err) + } + + time.Sleep(time.Millisecond * 50) + + expectConnectedness(t, nets[2], nets[1], network.NotConnected) + + for _, n := range nets { + n.Close() + } + + for _, n := range nets { + <-n.Process().Closed() + } +} + +func expectConnectedness(t *testing.T, a, b network.Network, expected network.Connectedness) { + es := "%s is connected to %s, but Connectedness incorrect. %s %s %s" + atob := a.Connectedness(b.LocalPeer()) + btoa := b.Connectedness(a.LocalPeer()) + if atob != expected { + t.Errorf(es, a, b, printConns(a), printConns(b), atob) + } + + // test symmetric case + if btoa != expected { + t.Errorf(es, b, a, printConns(b), printConns(a), btoa) + } +} + +func printConns(n network.Network) string { + s := fmt.Sprintf("Connections in %s:\n", n) + for _, c := range n.Conns() { + s = s + fmt.Sprintf("- %s\n", c) + } + return s +} + +func TestNetworkOpenStream(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testString := "hello ipfs" + + nets := make([]network.Network, 4) + for i := 0; i < 4; i++ { + nets[i] = GenSwarm(t, ctx) + } + + dial := func(a, b network.Network) { + DivulgeAddresses(b, a) + if _, err := a.DialPeer(ctx, b.LocalPeer()); err != nil { + t.Fatalf("Failed to dial: %s", err) + } + } + + dial(nets[0], nets[1]) + dial(nets[0], nets[3]) + dial(nets[1], nets[2]) + + done := make(chan bool) + nets[1].SetStreamHandler(func(s network.Stream) { + defer close(done) + defer s.Close() + + buf, err := ioutil.ReadAll(s) + if err != nil { + t.Error(err) + return + } + if string(buf) != testString { + t.Error("got wrong message") + } + }) + + s, err := nets[0].NewStream(ctx, nets[1].LocalPeer()) + if err != nil { + t.Fatal(err) + } + + numStreams := 0 + for _, conn := range nets[0].ConnsToPeer(nets[1].LocalPeer()) { + numStreams += len(conn.GetStreams()) + } + + if numStreams != 1 { + t.Fatal("should only have one stream there") + } + + n, err := s.Write([]byte(testString)) + if err != nil { + t.Fatal(err) + } else if n != len(testString) { + t.Errorf("expected to write %d bytes, wrote %d", len(testString), n) + } + + err = s.Close() + if err != nil { + t.Fatal(err) + } + + select { + case <-done: + case <-time.After(time.Millisecond * 100): + t.Fatal("timed out waiting on stream") + } + + _, err = nets[1].NewStream(ctx, nets[3].LocalPeer()) + if err == nil { + t.Fatal("expected stream open 1->3 to fail") + } +} diff --git a/swarm_notif_test.go b/swarm_notif_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8537363a199d9c481b1388ccb22ec81e0d577a00 --- /dev/null +++ b/swarm_notif_test.go @@ -0,0 +1,228 @@ +package swarm_test + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + + ma "github.com/multiformats/go-multiaddr" + + . "github.com/libp2p/go-libp2p-swarm" +) + +func TestNotifications(t *testing.T) { + const swarmSize = 5 + + notifiees := make([]*netNotifiee, swarmSize) + + ctx := context.Background() + swarms := makeSwarms(ctx, t, swarmSize) + defer func() { + for i, s := range swarms { + select { + case <-notifiees[i].listenClose: + t.Error("should not have been closed") + default: + } + err := s.Close() + if err != nil { + t.Error(err) + } + select { + case <-notifiees[i].listenClose: + default: + t.Error("expected a listen close notification") + } + } + }() + + timeout := 5 * time.Second + + // signup notifs + for i, swarm := range swarms { + n := newNetNotifiee(swarmSize) + swarm.Notify(n) + notifiees[i] = n + } + + connectSwarms(t, ctx, swarms) + + <-time.After(time.Millisecond) + // should've gotten 5 by now. + + // test everyone got the correct connection opened calls + for i, s := range swarms { + n := notifiees[i] + notifs := make(map[peer.ID][]network.Conn) + for j, s2 := range swarms { + if i == j { + continue + } + + // this feels a little sketchy, but its probably okay + for len(s.ConnsToPeer(s2.LocalPeer())) != len(notifs[s2.LocalPeer()]) { + select { + case c := <-n.connected: + nfp := notifs[c.RemotePeer()] + notifs[c.RemotePeer()] = append(nfp, c) + case <-time.After(timeout): + t.Fatal("timeout") + } + } + } + + for p, cons := range notifs { + expect := s.ConnsToPeer(p) + if len(expect) != len(cons) { + t.Fatal("got different number of connections") + } + + for _, c := range cons { + var found bool + for _, c2 := range expect { + if c == c2 { + found = true + break + } + } + + if !found { + t.Fatal("connection not found!") + } + } + } + } + + complement := func(c network.Conn) (*Swarm, *netNotifiee, *Conn) { + for i, s := range swarms { + for _, c2 := range s.Conns() { + if c.LocalMultiaddr().Equal(c2.RemoteMultiaddr()) && + c2.LocalMultiaddr().Equal(c.RemoteMultiaddr()) { + return s, notifiees[i], c2.(*Conn) + } + } + } + t.Fatal("complementary conn not found", c) + return nil, nil, nil + } + + testOCStream := func(n *netNotifiee, s network.Stream) { + var s2 network.Stream + select { + case s2 = <-n.openedStream: + t.Log("got notif for opened stream") + case <-time.After(timeout): + t.Fatal("timeout") + } + if s != s2 { + t.Fatal("got incorrect stream", s.Conn(), s2.Conn()) + } + + select { + case s2 = <-n.closedStream: + t.Log("got notif for closed stream") + case <-time.After(timeout): + t.Fatal("timeout") + } + if s != s2 { + t.Fatal("got incorrect stream", s.Conn(), s2.Conn()) + } + } + + streams := make(chan network.Stream) + for _, s := range swarms { + s.SetStreamHandler(func(s network.Stream) { + streams <- s + s.Reset() + }) + } + + // open a streams in each conn + for i, s := range swarms { + for _, c := range s.Conns() { + _, n2, _ := complement(c) + + st1, err := c.NewStream(context.Background()) + if err != nil { + t.Error(err) + } else { + st1.Write([]byte("hello")) + st1.Reset() + testOCStream(notifiees[i], st1) + st2 := <-streams + testOCStream(n2, st2) + } + } + } + + // close conns + for i, s := range swarms { + n := notifiees[i] + for _, c := range s.Conns() { + _, n2, c2 := complement(c) + c.Close() + c2.Close() + + var c3, c4 network.Conn + select { + case c3 = <-n.disconnected: + case <-time.After(timeout): + t.Fatal("timeout") + } + if c != c3 { + t.Fatal("got incorrect conn", c, c3) + } + + select { + case c4 = <-n2.disconnected: + case <-time.After(timeout): + t.Fatal("timeout") + } + if c2 != c4 { + t.Fatal("got incorrect conn", c, c2) + } + } + } +} + +type netNotifiee struct { + listen chan ma.Multiaddr + listenClose chan ma.Multiaddr + connected chan network.Conn + disconnected chan network.Conn + openedStream chan network.Stream + closedStream chan network.Stream +} + +func newNetNotifiee(buffer int) *netNotifiee { + return &netNotifiee{ + listen: make(chan ma.Multiaddr, buffer), + listenClose: make(chan ma.Multiaddr, buffer), + connected: make(chan network.Conn, buffer), + disconnected: make(chan network.Conn, buffer), + openedStream: make(chan network.Stream, buffer), + closedStream: make(chan network.Stream, buffer), + } +} + +func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) { + nn.listen <- a +} +func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) { + nn.listenClose <- a +} +func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { + nn.connected <- v +} +func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { + nn.disconnected <- v +} +func (nn *netNotifiee) OpenedStream(n network.Network, v network.Stream) { + nn.openedStream <- v +} +func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) { + nn.closedStream <- v +} diff --git a/swarm_stream.go b/swarm_stream.go new file mode 100644 index 0000000000000000000000000000000000000000..a5f0738ad61196c8fe96e3974c590346fefbfdaf --- /dev/null +++ b/swarm_stream.go @@ -0,0 +1,156 @@ +package swarm + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/libp2p/go-libp2p-core/mux" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/protocol" +) + +// Validate Stream conforms to the go-libp2p-net Stream interface +var _ network.Stream = &Stream{} + +// Stream is the stream type used by swarm. In general, you won't use this type +// directly. +type Stream struct { + id uint64 + + stream mux.MuxedStream + conn *Conn + + closeOnce sync.Once + + notifyLk sync.Mutex + + protocol atomic.Value + + stat network.Stat +} + +func (s *Stream) ID() string { + // format: -- + return fmt.Sprintf("%s-%d", s.conn.ID(), s.id) +} + +func (s *Stream) String() string { + return fmt.Sprintf( + " %s (%s)>", + s.conn.conn.Transport(), + s.conn.LocalMultiaddr(), + s.conn.LocalPeer(), + s.conn.RemoteMultiaddr(), + s.conn.RemotePeer(), + ) +} + +// Conn returns the Conn associated with this stream, as an network.Conn +func (s *Stream) Conn() network.Conn { + return s.conn +} + +// Read reads bytes from a stream. +func (s *Stream) Read(p []byte) (int, error) { + n, err := s.stream.Read(p) + // TODO: push this down to a lower level for better accuracy. + if s.conn.swarm.bwc != nil { + s.conn.swarm.bwc.LogRecvMessage(int64(n)) + s.conn.swarm.bwc.LogRecvMessageStream(int64(n), s.Protocol(), s.Conn().RemotePeer()) + } + return n, err +} + +// Write writes bytes to a stream, flushing for each call. +func (s *Stream) Write(p []byte) (int, error) { + n, err := s.stream.Write(p) + // TODO: push this down to a lower level for better accuracy. + if s.conn.swarm.bwc != nil { + s.conn.swarm.bwc.LogSentMessage(int64(n)) + s.conn.swarm.bwc.LogSentMessageStream(int64(n), s.Protocol(), s.Conn().RemotePeer()) + } + return n, err +} + +// Close closes the stream, closing both ends and freeing all associated +// resources. +func (s *Stream) Close() error { + err := s.stream.Close() + s.closeOnce.Do(s.remove) + return err +} + +// Reset resets the stream, signaling an error on both ends and freeing all +// associated resources. +func (s *Stream) Reset() error { + err := s.stream.Reset() + s.closeOnce.Do(s.remove) + return err +} + +// Close closes the stream for writing, flushing all data and sending an EOF. +// This function does not free resources, call Close or Reset when done with the +// stream. +func (s *Stream) CloseWrite() error { + return s.stream.CloseWrite() +} + +// Close closes the stream for reading. This function does not free resources, +// call Close or Reset when done with the stream. +func (s *Stream) CloseRead() error { + return s.stream.CloseRead() +} + +func (s *Stream) remove() { + s.conn.removeStream(s) + + // We *must* do this in a goroutine. This can be called during a + // an open notification and will block until that notification is done. + go func() { + s.notifyLk.Lock() + defer s.notifyLk.Unlock() + + s.conn.swarm.notifyAll(func(f network.Notifiee) { + f.ClosedStream(s.conn.swarm, s) + }) + s.conn.swarm.refs.Done() + }() +} + +// Protocol returns the protocol negotiated on this stream (if set). +func (s *Stream) Protocol() protocol.ID { + // Ignore type error. It means that the protocol is unset. + p, _ := s.protocol.Load().(protocol.ID) + return p +} + +// SetProtocol sets the protocol for this stream. +// +// This doesn't actually *do* anything other than record the fact that we're +// speaking the given protocol over this stream. It's still up to the user to +// negotiate the protocol. This is usually done by the Host. +func (s *Stream) SetProtocol(p protocol.ID) { + s.protocol.Store(p) +} + +// SetDeadline sets the read and write deadlines for this stream. +func (s *Stream) SetDeadline(t time.Time) error { + return s.stream.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline for this stream. +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.stream.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline for this stream. +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.stream.SetWriteDeadline(t) +} + +// Stat returns metadata information for this stream. +func (s *Stream) Stat() network.Stat { + return s.stat +} diff --git a/swarm_test.go b/swarm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a94281b1f516e56b928e895c546f2e5fb39f4d6a --- /dev/null +++ b/swarm_test.go @@ -0,0 +1,476 @@ +package swarm_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + "sync" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/control" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peerstore" + + . "github.com/libp2p/go-libp2p-swarm" + . "github.com/libp2p/go-libp2p-swarm/testing" + + logging "github.com/ipfs/go-log" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/stretchr/testify/require" +) + +var log = logging.Logger("swarm_test") + +func EchoStreamHandler(stream network.Stream) { + go func() { + defer stream.Close() + + // pull out the ipfs conn + c := stream.Conn() + log.Infof("%s ponging to %s", c.LocalPeer(), c.RemotePeer()) + + buf := make([]byte, 4) + + for { + if _, err := stream.Read(buf); err != nil { + if err != io.EOF { + log.Error("ping receive error:", err) + } + return + } + + if !bytes.Equal(buf, []byte("ping")) { + log.Errorf("ping receive error: ping != %s %v", buf, buf) + return + } + + if _, err := stream.Write([]byte("pong")); err != nil { + log.Error("pond send error:", err) + return + } + } + }() +} + +func makeDialOnlySwarm(ctx context.Context, t *testing.T) *Swarm { + swarm := GenSwarm(t, ctx, OptDialOnly) + swarm.SetStreamHandler(EchoStreamHandler) + + return swarm +} + +func makeSwarms(ctx context.Context, t *testing.T, num int, opts ...Option) []*Swarm { + swarms := make([]*Swarm, 0, num) + + for i := 0; i < num; i++ { + swarm := GenSwarm(t, ctx, opts...) + swarm.SetStreamHandler(EchoStreamHandler) + swarms = append(swarms, swarm) + } + + return swarms +} + +func connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) { + + var wg sync.WaitGroup + connect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) { + // TODO: make a DialAddr func. + s.Peerstore().AddAddr(dst, addr, peerstore.PermanentAddrTTL) + if _, err := s.DialPeer(ctx, dst); err != nil { + t.Fatal("error swarm dialing to peer", err) + } + wg.Done() + } + + log.Info("Connecting swarms simultaneously.") + for i, s1 := range swarms { + for _, s2 := range swarms[i+1:] { + wg.Add(1) + connect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) // try the first. + } + } + wg.Wait() + + for _, s := range swarms { + log.Infof("%s swarm routing table: %s", s.LocalPeer(), s.Peers()) + } +} + +func SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) { + // t.Skip("skipping for another test") + + ctx := context.Background() + swarms := makeSwarms(ctx, t, SwarmNum, OptDisableReuseport) + + // connect everyone + connectSwarms(t, ctx, swarms) + + // ping/pong + for _, s1 := range swarms { + log.Debugf("-------------------------------------------------------") + log.Debugf("%s ping pong round", s1.LocalPeer()) + log.Debugf("-------------------------------------------------------") + + _, cancel := context.WithCancel(ctx) + got := map[peer.ID]int{} + errChan := make(chan error, MsgNum*len(swarms)) + streamChan := make(chan network.Stream, MsgNum) + + // send out "ping" x MsgNum to every peer + go func() { + defer close(streamChan) + + var wg sync.WaitGroup + send := func(p peer.ID) { + defer wg.Done() + + // first, one stream per peer (nice) + stream, err := s1.NewStream(ctx, p) + if err != nil { + errChan <- err + return + } + + // send out ping! + for k := 0; k < MsgNum; k++ { // with k messages + msg := "ping" + log.Debugf("%s %s %s (%d)", s1.LocalPeer(), msg, p, k) + if _, err := stream.Write([]byte(msg)); err != nil { + errChan <- err + continue + } + } + + // read it later + streamChan <- stream + } + + for _, s2 := range swarms { + if s2.LocalPeer() == s1.LocalPeer() { + continue // dont send to self... + } + + wg.Add(1) + go send(s2.LocalPeer()) + } + wg.Wait() + }() + + // receive "pong" x MsgNum from every peer + go func() { + defer close(errChan) + count := 0 + countShouldBe := MsgNum * (len(swarms) - 1) + for stream := range streamChan { // one per peer + // get peer on the other side + p := stream.Conn().RemotePeer() + + // receive pings + msgCount := 0 + msg := make([]byte, 4) + for k := 0; k < MsgNum; k++ { // with k messages + + // read from the stream + if _, err := stream.Read(msg); err != nil { + errChan <- err + continue + } + + if string(msg) != "pong" { + errChan <- fmt.Errorf("unexpected message: %s", msg) + continue + } + + log.Debugf("%s %s %s (%d)", s1.LocalPeer(), msg, p, k) + msgCount++ + } + + got[p] = msgCount + count += msgCount + stream.Close() + } + + if count != countShouldBe { + errChan <- fmt.Errorf("count mismatch: %d != %d", count, countShouldBe) + } + }() + + // check any errors (blocks till consumer is done) + for err := range errChan { + if err != nil { + t.Error(err.Error()) + } + } + + log.Debugf("%s got pongs", s1.LocalPeer()) + if (len(swarms) - 1) != len(got) { + t.Errorf("got (%d) less messages than sent (%d).", len(got), len(swarms)) + } + + for p, n := range got { + if n != MsgNum { + t.Error("peer did not get all msgs", p, n, "/", MsgNum) + } + } + + cancel() + <-time.After(10 * time.Millisecond) + } + + for _, s := range swarms { + s.Close() + } +} + +func TestSwarm(t *testing.T) { + // t.Skip("skipping for another test") + t.Parallel() + + // msgs := 1000 + msgs := 100 + swarms := 5 + SubtestSwarm(t, swarms, msgs) +} + +func TestBasicSwarm(t *testing.T) { + // t.Skip("skipping for another test") + t.Parallel() + + msgs := 1 + swarms := 2 + SubtestSwarm(t, swarms, msgs) +} + +func TestConnHandler(t *testing.T) { + // t.Skip("skipping for another test") + t.Parallel() + + ctx := context.Background() + swarms := makeSwarms(ctx, t, 5) + + gotconn := make(chan struct{}, 10) + swarms[0].SetConnHandler(func(conn network.Conn) { + gotconn <- struct{}{} + }) + + connectSwarms(t, ctx, swarms) + + <-time.After(time.Millisecond) + // should've gotten 5 by now. + + swarms[0].SetConnHandler(nil) + + expect := 4 + for i := 0; i < expect; i++ { + select { + case <-time.After(time.Second): + t.Fatal("failed to get connections") + case <-gotconn: + } + } + + select { + case <-gotconn: + t.Fatalf("should have connected to %d swarms, got an extra.", expect) + default: + } +} + +func TestConnectionGating(t *testing.T) { + ctx := context.Background() + tcs := map[string]struct { + p1Gater func(gater *MockConnectionGater) *MockConnectionGater + p2Gater func(gater *MockConnectionGater) *MockConnectionGater + + p1ConnectednessToP2 network.Connectedness + p2ConnectednessToP1 network.Connectedness + isP1OutboundErr bool + disableOnQUIC bool + }{ + "no gating": { + p1ConnectednessToP2: network.Connected, + p2ConnectednessToP1: network.Connected, + isP1OutboundErr: false, + }, + "p1 gates outbound peer dial": { + p1Gater: func(c *MockConnectionGater) *MockConnectionGater { + c.PeerDial = func(p peer.ID) bool { return false } + return c + }, + p1ConnectednessToP2: network.NotConnected, + p2ConnectednessToP1: network.NotConnected, + isP1OutboundErr: true, + }, + "p1 gates outbound addr dialing": { + p1Gater: func(c *MockConnectionGater) *MockConnectionGater { + c.Dial = func(p peer.ID, addr ma.Multiaddr) bool { return false } + return c + }, + p1ConnectednessToP2: network.NotConnected, + p2ConnectednessToP1: network.NotConnected, + isP1OutboundErr: true, + }, + "p2 accepts inbound peer dial if outgoing dial is gated": { + p2Gater: func(c *MockConnectionGater) *MockConnectionGater { + c.Dial = func(peer.ID, ma.Multiaddr) bool { return false } + return c + }, + p1ConnectednessToP2: network.Connected, + p2ConnectednessToP1: network.Connected, + isP1OutboundErr: false, + }, + "p2 gates inbound peer dial before securing": { + p2Gater: func(c *MockConnectionGater) *MockConnectionGater { + c.Accept = func(c network.ConnMultiaddrs) bool { return false } + return c + }, + p1ConnectednessToP2: network.NotConnected, + p2ConnectednessToP1: network.NotConnected, + isP1OutboundErr: true, + // QUIC gates the connection after completion of the handshake + disableOnQUIC: true, + }, + "p2 gates inbound peer dial before multiplexing": { + p1Gater: func(c *MockConnectionGater) *MockConnectionGater { + c.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool { return false } + return c + }, + p1ConnectednessToP2: network.NotConnected, + p2ConnectednessToP1: network.NotConnected, + isP1OutboundErr: true, + }, + "p2 gates inbound peer dial after upgrading": { + p1Gater: func(c *MockConnectionGater) *MockConnectionGater { + c.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) { return false, 0 } + return c + }, + p1ConnectednessToP2: network.NotConnected, + p2ConnectednessToP1: network.NotConnected, + isP1OutboundErr: true, + }, + "p2 gates outbound dials": { + p2Gater: func(c *MockConnectionGater) *MockConnectionGater { + c.PeerDial = func(p peer.ID) bool { return false } + return c + }, + p1ConnectednessToP2: network.Connected, + p2ConnectednessToP1: network.Connected, + isP1OutboundErr: false, + }, + } + + for n, tc := range tcs { + for _, useQuic := range []bool{false, true} { + trString := "TCP" + optTransport := OptDisableQUIC + if useQuic { + if tc.disableOnQUIC { + continue + } + trString = "QUIC" + optTransport = OptDisableTCP + } + t.Run(fmt.Sprintf("%s %s", n, trString), func(t *testing.T) { + p1Gater := DefaultMockConnectionGater() + p2Gater := DefaultMockConnectionGater() + if tc.p1Gater != nil { + p1Gater = tc.p1Gater(p1Gater) + } + if tc.p2Gater != nil { + p2Gater = tc.p2Gater(p2Gater) + } + + sw1 := GenSwarm(t, ctx, OptConnGater(p1Gater), optTransport) + sw2 := GenSwarm(t, ctx, OptConnGater(p2Gater), optTransport) + + p1 := sw1.LocalPeer() + p2 := sw2.LocalPeer() + sw1.Peerstore().AddAddr(p2, sw2.ListenAddresses()[0], peerstore.PermanentAddrTTL) + // 1 -> 2 + _, err := sw1.DialPeer(ctx, p2) + + require.Equal(t, tc.isP1OutboundErr, err != nil, n) + require.Equal(t, tc.p1ConnectednessToP2, sw1.Connectedness(p2), n) + + require.Eventually(t, func() bool { + return tc.p2ConnectednessToP1 == sw2.Connectedness(p1) + }, 2*time.Second, 100*time.Millisecond, n) + }) + } + } +} + +func TestNoDial(t *testing.T) { + ctx := context.Background() + swarms := makeSwarms(ctx, t, 2) + + _, err := swarms[0].NewStream(network.WithNoDial(ctx, "swarm test"), swarms[1].LocalPeer()) + if err != network.ErrNoConn { + t.Fatal("should have failed with ErrNoConn") + } +} + +func TestCloseWithOpenStreams(t *testing.T) { + ctx := context.Background() + swarms := makeSwarms(ctx, t, 2) + connectSwarms(t, ctx, swarms) + + s, err := swarms[0].NewStream(ctx, swarms[1].LocalPeer()) + if err != nil { + t.Fatal(err) + } + defer s.Close() + // close swarm before stream. + err = swarms[0].Close() + if err != nil { + t.Fatal(err) + } +} + +func TestTypedNilConn(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := GenSwarm(t, ctx) + defer s.Close() + + // We can't dial ourselves. + c, err := s.DialPeer(ctx, s.LocalPeer()) + require.Error(t, err) + // If we fail to dial, the connection should be nil. + require.True(t, c == nil) +} + +func TestPreventDialListenAddr(t *testing.T) { + s := GenSwarm(t, context.Background(), OptDialOnly) + if err := s.Listen(ma.StringCast("/ip4/0.0.0.0/udp/0/quic")); err != nil { + t.Fatal(err) + } + addrs, err := s.InterfaceListenAddresses() + if err != nil { + t.Fatal(err) + } + var addr ma.Multiaddr + for _, a := range addrs { + _, s, err := manet.DialArgs(a) + if err != nil { + t.Fatal(err) + } + if strings.Split(s, ":")[0] == "127.0.0.1" { + addr = a + break + } + } + remote := peer.ID("foobar") + s.Peerstore().AddAddr(remote, addr, time.Hour) + _, err = s.DialPeer(context.Background(), remote) + if !errors.Is(err, ErrNoGoodAddresses) { + t.Fatal("expected dial to fail: %w", err) + } +} diff --git a/swarm_transport.go b/swarm_transport.go new file mode 100644 index 0000000000000000000000000000000000000000..21728ac3b5114b9a99c7f04a4ec1447131722503 --- /dev/null +++ b/swarm_transport.go @@ -0,0 +1,111 @@ +package swarm + +import ( + "fmt" + "strings" + + "github.com/libp2p/go-libp2p-core/transport" + + ma "github.com/multiformats/go-multiaddr" +) + +// TransportForDialing retrieves the appropriate transport for dialing the given +// multiaddr. +func (s *Swarm) TransportForDialing(a ma.Multiaddr) transport.Transport { + protocols := a.Protocols() + if len(protocols) == 0 { + return nil + } + + s.transports.RLock() + defer s.transports.RUnlock() + if len(s.transports.m) == 0 { + // make sure we're not just shutting down. + if s.transports.m != nil { + log.Error("you have no transports configured") + } + return nil + } + + for _, p := range protocols { + transport, ok := s.transports.m[p.Code] + if !ok { + continue + } + if transport.Proxy() { + return transport + } + } + + return s.transports.m[protocols[len(protocols)-1].Code] +} + +// TransportForListening retrieves the appropriate transport for listening on +// the given multiaddr. +func (s *Swarm) TransportForListening(a ma.Multiaddr) transport.Transport { + protocols := a.Protocols() + if len(protocols) == 0 { + return nil + } + + s.transports.RLock() + defer s.transports.RUnlock() + if len(s.transports.m) == 0 { + // make sure we're not just shutting down. + if s.transports.m != nil { + log.Error("you have no transports configured") + } + return nil + } + + selected := s.transports.m[protocols[len(protocols)-1].Code] + for _, p := range protocols { + transport, ok := s.transports.m[p.Code] + if !ok { + continue + } + if transport.Proxy() { + selected = transport + } + } + return selected +} + +// AddTransport adds a transport to this swarm. +// +// Satisfies the Network interface from go-libp2p-transport. +func (s *Swarm) AddTransport(t transport.Transport) error { + protocols := t.Protocols() + + if len(protocols) == 0 { + return fmt.Errorf("useless transport handles no protocols: %T", t) + } + + s.transports.Lock() + defer s.transports.Unlock() + if s.transports.m == nil { + return ErrSwarmClosed + } + var registered []string + for _, p := range protocols { + if _, ok := s.transports.m[p]; ok { + proto := ma.ProtocolWithCode(p) + name := proto.Name + if name == "" { + name = fmt.Sprintf("unknown (%d)", p) + } + registered = append(registered, name) + } + } + if len(registered) > 0 { + return fmt.Errorf( + "transports already registered for protocol(s): %s", + strings.Join(registered, ", "), + ) + } + + for _, p := range protocols { + s.transports.m[p] = t + } + return nil +} diff --git a/testing/testing.go b/testing/testing.go new file mode 100644 index 0000000000000000000000000000000000000000..313e3d771e3a739f733343b668a43506e77c6cf7 --- /dev/null +++ b/testing/testing.go @@ -0,0 +1,217 @@ +package testing + +import ( + "context" + "testing" + + csms "github.com/libp2p/go-conn-security-multistream" + "github.com/libp2p/go-libp2p-core/connmgr" + "github.com/libp2p/go-libp2p-core/control" + "github.com/libp2p/go-libp2p-core/crypto" + "github.com/libp2p/go-libp2p-core/metrics" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p-core/sec/insecure" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" + quic "github.com/libp2p/go-libp2p-quic-transport" + swarm "github.com/libp2p/go-libp2p-swarm" + "github.com/libp2p/go-libp2p-testing/net" + tptu "github.com/libp2p/go-libp2p-transport-upgrader" + yamux "github.com/libp2p/go-libp2p-yamux" + msmux "github.com/libp2p/go-stream-muxer-multistream" + "github.com/libp2p/go-tcp-transport" + + "github.com/jbenet/goprocess" + ma "github.com/multiformats/go-multiaddr" +) + +type config struct { + disableReuseport bool + dialOnly bool + disableTCP bool + disableQUIC bool + connectionGater connmgr.ConnectionGater + sk crypto.PrivKey +} + +// Option is an option that can be passed when constructing a test swarm. +type Option func(*testing.T, *config) + +// OptDisableReuseport disables reuseport in this test swarm. +var OptDisableReuseport Option = func(_ *testing.T, c *config) { + c.disableReuseport = true +} + +// OptDialOnly prevents the test swarm from listening. +var OptDialOnly Option = func(_ *testing.T, c *config) { + c.dialOnly = true +} + +// OptDisableTCP disables TCP. +var OptDisableTCP Option = func(_ *testing.T, c *config) { + c.disableTCP = true +} + +// OptDisableQUIC disables QUIC. +var OptDisableQUIC Option = func(_ *testing.T, c *config) { + c.disableQUIC = true +} + +// OptConnGater configures the given connection gater on the test +func OptConnGater(cg connmgr.ConnectionGater) Option { + return func(_ *testing.T, c *config) { + c.connectionGater = cg + } +} + +// OptPeerPrivateKey configures the peer private key which is then used to derive the public key and peer ID. +func OptPeerPrivateKey(sk crypto.PrivKey) Option { + return func(_ *testing.T, c *config) { + c.sk = sk + } +} + +// GenUpgrader creates a new connection upgrader for use with this swarm. +func GenUpgrader(n *swarm.Swarm) *tptu.Upgrader { + id := n.LocalPeer() + pk := n.Peerstore().PrivKey(id) + secMuxer := new(csms.SSMuxer) + secMuxer.AddTransport(insecure.ID, insecure.NewWithIdentity(id, pk)) + + stMuxer := msmux.NewBlankTransport() + stMuxer.AddTransport("/yamux/1.0.0", yamux.DefaultTransport) + + return &tptu.Upgrader{ + Secure: secMuxer, + Muxer: stMuxer, + } + +} + +// GenSwarm generates a new test swarm. +func GenSwarm(t *testing.T, ctx context.Context, opts ...Option) *swarm.Swarm { + var cfg config + for _, o := range opts { + o(t, &cfg) + } + + var p tnet.PeerNetParams + if cfg.sk == nil { + p = tnet.RandPeerNetParamsOrFatal(t) + } else { + pk := cfg.sk.GetPublic() + id, err := peer.IDFromPublicKey(pk) + if err != nil { + t.Fatal(err) + } + p.PrivKey = cfg.sk + p.PubKey = pk + p.ID = id + p.Addr = tnet.ZeroLocalTCPAddress + } + + ps := pstoremem.NewPeerstore() + ps.AddPubKey(p.ID, p.PubKey) + ps.AddPrivKey(p.ID, p.PrivKey) + s := swarm.NewSwarm(ctx, p.ID, ps, metrics.NewBandwidthCounter(), cfg.connectionGater) + + // Call AddChildNoWait because we can't call AddChild after the process + // may have been closed (e.g., if the context was canceled). + s.Process().AddChildNoWait(goprocess.WithTeardown(ps.Close)) + + upgrader := GenUpgrader(s) + upgrader.ConnGater = cfg.connectionGater + + if !cfg.disableTCP { + tcpTransport := tcp.NewTCPTransport(upgrader) + tcpTransport.DisableReuseport = cfg.disableReuseport + if err := s.AddTransport(tcpTransport); err != nil { + t.Fatal(err) + } + if !cfg.dialOnly { + if err := s.Listen(p.Addr); err != nil { + t.Fatal(err) + } + } + } + if !cfg.disableQUIC { + quicTransport, err := quic.NewTransport(p.PrivKey, nil, cfg.connectionGater) + if err != nil { + t.Fatal(err) + } + if err := s.AddTransport(quicTransport); err != nil { + t.Fatal(err) + } + if !cfg.dialOnly { + if err := s.Listen(ma.StringCast("/ip4/127.0.0.1/udp/0/quic")); err != nil { + t.Fatal(err) + } + } + } + if !cfg.dialOnly { + s.Peerstore().AddAddrs(p.ID, s.ListenAddresses(), peerstore.PermanentAddrTTL) + } + return s +} + +// DivulgeAddresses adds swarm a's addresses to swarm b's peerstore. +func DivulgeAddresses(a, b network.Network) { + id := a.LocalPeer() + addrs := a.Peerstore().Addrs(id) + b.Peerstore().AddAddrs(id, addrs, peerstore.PermanentAddrTTL) +} + +// MockConnectionGater is a mock connection gater to be used by the tests. +type MockConnectionGater struct { + Dial func(p peer.ID, addr ma.Multiaddr) bool + PeerDial func(p peer.ID) bool + Accept func(c network.ConnMultiaddrs) bool + Secured func(network.Direction, peer.ID, network.ConnMultiaddrs) bool + Upgraded func(c network.Conn) (bool, control.DisconnectReason) +} + +func DefaultMockConnectionGater() *MockConnectionGater { + m := &MockConnectionGater{} + m.Dial = func(p peer.ID, addr ma.Multiaddr) bool { + return true + } + + m.PeerDial = func(p peer.ID) bool { + return true + } + + m.Accept = func(c network.ConnMultiaddrs) bool { + return true + } + + m.Secured = func(network.Direction, peer.ID, network.ConnMultiaddrs) bool { + return true + } + + m.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) { + return true, 0 + } + + return m +} + +func (m *MockConnectionGater) InterceptAddrDial(p peer.ID, addr ma.Multiaddr) (allow bool) { + return m.Dial(p, addr) +} + +func (m *MockConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) { + return m.PeerDial(p) +} + +func (m *MockConnectionGater) InterceptAccept(c network.ConnMultiaddrs) (allow bool) { + return m.Accept(c) +} + +func (m *MockConnectionGater) InterceptSecured(d network.Direction, p peer.ID, c network.ConnMultiaddrs) (allow bool) { + return m.Secured(d, p, c) +} + +func (m *MockConnectionGater) InterceptUpgraded(tc network.Conn) (allow bool, reason control.DisconnectReason) { + return m.Upgraded(tc) +} diff --git a/transport_test.go b/transport_test.go new file mode 100644 index 0000000000000000000000000000000000000000..82225840b9b0947cf38e4ec3adbb9d920b828ba7 --- /dev/null +++ b/transport_test.go @@ -0,0 +1,80 @@ +package swarm_test + +import ( + "context" + "testing" + + swarm "github.com/libp2p/go-libp2p-swarm" + swarmt "github.com/libp2p/go-libp2p-swarm/testing" + + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/transport" + ma "github.com/multiformats/go-multiaddr" +) + +type dummyTransport struct { + protocols []int + proxy bool + closed bool +} + +func (dt *dummyTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (transport.CapableConn, error) { + panic("unimplemented") +} + +func (dt *dummyTransport) CanDial(addr ma.Multiaddr) bool { + panic("unimplemented") +} + +func (dt *dummyTransport) Listen(laddr ma.Multiaddr) (transport.Listener, error) { + panic("unimplemented") +} + +func (dt *dummyTransport) Proxy() bool { + return dt.proxy +} + +func (dt *dummyTransport) Protocols() []int { + return dt.protocols +} +func (dt *dummyTransport) Close() error { + dt.closed = true + return nil +} + +func TestUselessTransport(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := swarmt.GenSwarm(t, ctx) + err := s.AddTransport(new(dummyTransport)) + if err == nil { + t.Fatal("adding a transport that supports no protocols should have failed") + } +} + +func TestTransportClose(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := swarmt.GenSwarm(t, ctx) + tpt := &dummyTransport{protocols: []int{1}} + if err := s.AddTransport(tpt); err != nil { + t.Fatal(err) + } + _ = s.Close() + if !tpt.closed { + t.Fatal("expected transport to be closed") + } + +} + +func TestTransportAfterClose(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := swarmt.GenSwarm(t, ctx) + s.Close() + + tpt := &dummyTransport{protocols: []int{1}} + if err := s.AddTransport(tpt); err != swarm.ErrSwarmClosed { + t.Fatal("expected swarm closed error, got: ", err) + } +} diff --git a/util_test.go b/util_test.go new file mode 100644 index 0000000000000000000000000000000000000000..11124adb27651a20ed24a00f4691c4202f090f81 --- /dev/null +++ b/util_test.go @@ -0,0 +1,53 @@ +package swarm + +import ( + "fmt" + "testing" + + "github.com/libp2p/go-libp2p-core/test" + ma "github.com/multiformats/go-multiaddr" + + "github.com/stretchr/testify/require" +) + +func TestIsFdConsuming(t *testing.T) { + tcs := map[string]struct { + addr string + isFdConsuming bool + }{ + "tcp": { + addr: "/ip4/127.0.0.1/tcp/20", + isFdConsuming: true, + }, + "quic": { + addr: "/ip4/127.0.0.1/udp/0/quic", + isFdConsuming: false, + }, + "addr-without-registered-transport": { + addr: "/ip4/127.0.0.1/tcp/20/ws", + isFdConsuming: true, + }, + "relay-tcp": { + addr: fmt.Sprintf("/ip4/127.0.0.1/tcp/20/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)), + isFdConsuming: true, + }, + "relay-quic": { + addr: fmt.Sprintf("/ip4/127.0.0.1/udp/20/quic/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)), + isFdConsuming: false, + }, + "relay-without-serveraddr": { + addr: fmt.Sprintf("/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)), + isFdConsuming: true, + }, + "relay-without-registered-transport-server": { + addr: fmt.Sprintf("/ip4/127.0.0.1/tcp/20/ws/p2p-circuit/p2p/%s", test.RandPeerIDFatal(t)), + isFdConsuming: true, + }, + } + + for name := range tcs { + maddr, err := ma.NewMultiaddr(tcs[name].addr) + require.NoError(t, err, name) + require.Equal(t, tcs[name].isFdConsuming, isFdConsumingAddr(maddr), name) + } +}