swarm_dial.go 12.4 KB
Newer Older
1 2 3 4 5
package swarm

import (
	"errors"
	"fmt"
6
	"net"
7
	"sync"
8
	"time"
9

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
10
	conn "github.com/jbenet/go-ipfs/p2p/net/conn"
11
	addrutil "github.com/jbenet/go-ipfs/p2p/net/swarm/addr"
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
12
	peer "github.com/jbenet/go-ipfs/p2p/peer"
13 14 15
	lgbl "github.com/jbenet/go-ipfs/util/eventlog/loggables"

	context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
16
	ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
17
	manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
18 19
)

20 21 22 23 24 25 26 27 28 29 30
// Diagram of dial sync:
//
//   many callers of Dial()   synched w.  dials many addrs       results to callers
//  ----------------------\    dialsync    use earliest            /--------------
//  -----------------------\              |----------\           /----------------
//  ------------------------>------------<-------     >---------<-----------------
//  -----------------------|              \----x                 \----------------
//  ----------------------|                \-----x                \---------------
//                                         any may fail          if no addr at end
//                                                             retry dialAttempt x

31 32 33
// dialAttempts governs how many times a goroutine will try to dial a given peer.
const dialAttempts = 3

34 35 36
// DialTimeout is the amount of time each dial attempt has. We can think about making
// this larger down the road, or putting more granular timeouts (i.e. within each
// subcomponent of Dial)
37
var DialTimeout time.Duration = time.Second * 10
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
// dialsync is a small object that helps manage ongoing dials.
// this way, if we receive many simultaneous dial requests, one
// can do its thing, while the rest wait.
//
// this interface is so would-be dialers can just:
//
//  for {
//  	c := findConnectionToPeer(peer)
//  	if c != nil {
//  		return c
//  	}
//
//  	// ok, no connections. should we dial?
//  	if ok, wait := dialsync.Lock(peer); !ok {
//  		<-wait // can optionally wait
//  		continue
//  	}
//  	defer dialsync.Unlock(peer)
//
//  	c := actuallyDial(peer)
//  	return c
//  }
//
type dialsync struct {
	// ongoing is a map of tickets for the current peers being dialed.
	// this way, we dont kick off N dials simultaneously.
	ongoing map[peer.ID]chan struct{}
	lock    sync.Mutex
}

// Lock governs the beginning of a dial attempt.
// If there are no ongoing dials, it returns true, and the client is now
// scheduled to dial. Every other goroutine that calls startDial -- with
//the same dst -- will block until client is done. The client MUST call
// ds.Unlock(p) when it is done, to unblock the other callers.
// The client is not reponsible for achieving a successful dial, only for
// reporting the end of the attempt (calling ds.Unlock(p)).
//
// see the example below `dialsync`
func (ds *dialsync) Lock(dst peer.ID) (bool, chan struct{}) {
	ds.lock.Lock()
	if ds.ongoing == nil { // init if not ready
		ds.ongoing = make(map[peer.ID]chan struct{})
	}
	wait, found := ds.ongoing[dst]
	if !found {
		ds.ongoing[dst] = make(chan struct{})
	}
	ds.lock.Unlock()

	if found {
		return false, wait
	}

	// ok! you're signed up to dial!
	return true, nil
}

// Unlock releases waiters to a dial attempt. see Lock.
// if Unlock(p) is called without calling Lock(p) first, Unlock panics.
func (ds *dialsync) Unlock(dst peer.ID) {
	ds.lock.Lock()
	wait, found := ds.ongoing[dst]
	if !found {
		panic("called dialDone with no ongoing dials to peer: " + dst.Pretty())
	}
	delete(ds.ongoing, dst) // remove ongoing dial
	close(wait)             // release everyone else
	ds.lock.Unlock()
}

Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
// dialbackoff is a struct used to avoid over-dialing the same, dead peers.
// Whenever we totally time out on a peer (all three attempts), we add them
// to dialbackoff. Then, whenevers goroutines would _wait_ (dialsync), they
// check dialbackoff. If it's there, they don't wait and exit promptly with
// an error. (the single goroutine that is actually dialing continues to
// dial). If a dial is successful, the peer is removed from backoff.
// Example:
//
//  for {
//  	if ok, wait := dialsync.Lock(p); !ok {
//  		if backoff.Backoff(p) {
//  			return errDialFailed
//  		}
//  		<-wait
//  		continue
//  	}
//  	defer dialsync.Unlock(p)
//  	c, err := actuallyDial(p)
//  	if err != nil {
//  		dialbackoff.AddBackoff(p)
//  		continue
//  	}
//  	dialbackoff.Clear(p)
//  }
//
type dialbackoff struct {
	entries map[peer.ID]struct{}
	lock    sync.RWMutex
}

func (db *dialbackoff) init() {
	if db.entries == nil {
		db.entries = make(map[peer.ID]struct{})
	}
}

// Backoff returns whether the client should backoff from dialing
// peeer p
func (db *dialbackoff) Backoff(p peer.ID) bool {
	db.lock.Lock()
	db.init()
	_, found := db.entries[p]
	db.lock.Unlock()
	return found
}

// AddBackoff lets other nodes know that we've entered backoff with
// peer p, so dialers should not wait unnecessarily. We still will
// attempt to dial with one goroutine, in case we get through.
func (db *dialbackoff) AddBackoff(p peer.ID) {
	db.lock.Lock()
	db.init()
	db.entries[p] = struct{}{}
	db.lock.Unlock()
}

// Clear removes a backoff record. Clients should call this after a
// successful Dial.
func (db *dialbackoff) Clear(p peer.ID) {
	db.lock.Lock()
	db.init()
	delete(db.entries, p)
	db.lock.Unlock()
}

175 176 177 178 179 180
// Dial connects to a peer.
//
// The idea is that the client of Swarm does not need to know what network
// the connection will happen over. Swarm can use whichever it choses.
// This allows us to use various transport protocols, do NAT traversal/relay,
// etc. to achive connection.
181 182
func (s *Swarm) Dial(ctx context.Context, p peer.ID) (*Conn, error) {
	if p == s.local {
183 184 185
		return nil, errors.New("Attempted connection to self!")
	}

186 187 188 189 190
	// this loop is here because dials take time, and we should not be dialing
	// the same peer concurrently (silly waste). Additonally, it's structured
	// to check s.ConnectionsToPeer(p) _first_, and _between_ attempts because we
	// may have received an incoming connection! if so, we no longer must dial.
	//
191
	// During the dial attempts, we may be doing the dialing. if not, we wait.
192 193
	var err error
	var conn *Conn
194
	for i := 0; i < dialAttempts; i++ {
195 196
		// check if we already have an open connection first
		cs := s.ConnectionsToPeer(p)
197 198 199
		for _, conn = range cs {
			if conn != nil { // dump out the first one we find. (TODO pick better)
				return conn, nil
200
			}
201
		}
202 203

		// check if there's an ongoing dial to this peer
204
		if ok, wait := s.dsync.Lock(p); !ok {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
205 206 207 208 209 210 211

			if s.backf.Backoff(p) {
				log.Debugf("backoff")
				return nil, fmt.Errorf("%s failed to dial %s, backing off.", s.local, p)
			}

			log.Debugf("waiting for ongoing dial")
212
			select {
213 214 215
			case <-wait: // wait for that dial to finish.
				continue // and see if it worked (loop), OR we got an incoming dial.
			case <-ctx.Done(): // or we may have to bail...
216 217 218 219
				return nil, ctx.Err()
			}
		}

220
		// ok, we have been charged to dial! let's do it.
221
		// if it succeeds, dial will add the conn to the swarm itself.
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
222
		log.Debugf("dial start")
223
		ctxT, _ := context.WithTimeout(ctx, s.dialT)
224
		conn, err = s.dial(ctxT, p)
225
		s.dsync.Unlock(p)
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
226
		log.Debugf("dial end %s", conn)
227
		if err != nil {
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
228 229
			s.backf.AddBackoff(p) // let others know to backoff

230 231
			continue // ok, we failed. try again. (if loop is done, our error is output)
		}
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
232
		s.backf.Clear(p) // okay, no longer need to backoff
233 234
		return conn, nil
	}
235 236 237
	if err == nil {
		err = fmt.Errorf("%s failed to dial %s after %d attempts", s.local, p, dialAttempts)
	}
238 239 240 241 242 243 244
	return nil, err
}

// dial is the actual swarm's dial logic, gated by Dial.
func (s *Swarm) dial(ctx context.Context, p peer.ID) (*Conn, error) {
	if p == s.local {
		return nil, errors.New("Attempted connection to self!")
245 246
	}

247 248 249 250
	sk := s.peers.PrivKey(s.local)
	if sk == nil {
		// may be fine for sk to be nil, just log a warning.
		log.Warning("Dial not given PrivateKey, so WILL NOT SECURE conn.")
251 252
	}

253 254 255 256
	// get our own addrs. try dialing out from our listener addresses (reusing ports)
	// Note that using our peerstore's addresses here is incorrect, as that would
	// include observed addresses. TODO: make peerstore's address book smarter.
	localAddrs := s.ListenAddresses()
257 258 259 260 261
	if len(localAddrs) == 0 {
		log.Debug("Dialing out with no local addresses.")
	}

	// get remote peer addrs
262
	remoteAddrs := s.peers.Addresses(p)
263
	// make sure we can use the addresses.
264
	remoteAddrs = addrutil.FilterUsableAddrs(remoteAddrs)
265 266
	// drop out any addrs that would just dial ourselves. use ListenAddresses
	// as that is a more authoritative view than localAddrs.
267
	ila, _ := s.InterfaceListenAddresses()
268 269 270
	remoteAddrs = addrutil.Subtract(remoteAddrs, ila)
	remoteAddrs = addrutil.Subtract(remoteAddrs, s.peers.Addresses(s.local))
	log.Debugf("%s swarm dialing %s -- remote:%s local:%s", s.local, p, remoteAddrs, s.ListenAddresses())
271 272 273
	if len(remoteAddrs) == 0 {
		return nil, errors.New("peer has no addresses")
	}
274

275 276
	// open connection to peer
	d := &conn.Dialer{
277 278
		Dialer: manet.Dialer{
			Dialer: net.Dialer{
279
				Timeout: s.dialT,
280 281
			},
		},
282 283 284
		LocalPeer:  s.local,
		LocalAddrs: localAddrs,
		PrivateKey: sk,
285 286
	}

287 288
	// try to get a connection to any addr
	connC, err := s.dialAddrs(ctx, d, p, remoteAddrs)
289 290 291 292 293 294 295
	if err != nil {
		return nil, err
	}

	// ok try to setup the new connection.
	swarmC, err := dialConnSetup(ctx, s, connC)
	if err != nil {
296
		log.Debug("Dial newConnSetup failed. disconnecting.")
297
		log.Event(ctx, "dialFailureDisconnect", lgbl.NetConn(connC), lgbl.Error(err))
298
		connC.Close() // close the connection. didn't work out :(
299 300 301 302 303 304 305
		return nil, err
	}

	log.Event(ctx, "dial", p)
	return swarmC, nil
}

306 307 308
func (s *Swarm) dialAddrs(ctx context.Context, d *conn.Dialer, p peer.ID, remoteAddrs []ma.Multiaddr) (conn.Conn, error) {

	// try to connect to one of the peer's known addresses.
309 310 311 312
	// we dial concurrently to each of the addresses, which:
	// * makes the process faster overall
	// * attempts to get the fastest connection available.
	// * mitigates the waste of trying bad addresses
313
	log.Debugf("%s swarm dialing %s %s", s.local, p, remoteAddrs)
314 315 316 317 318 319 320 321 322

	ctx, cancel := context.WithCancel(ctx)
	defer cancel() // cancel work when we exit func

	foundConn := make(chan struct{})
	conns := make(chan conn.Conn, len(remoteAddrs))
	errs := make(chan error, len(remoteAddrs))

	//TODO: rate limiting just in case?
323
	for _, addr := range remoteAddrs {
324 325
		go func(addr ma.Multiaddr) {
			connC, err := s.dialAddr(ctx, d, p, addr)
326

327 328 329 330 331 332 333 334 335
			// check parent still wants our results
			select {
			case <-foundConn:
				if connC != nil {
					connC.Close()
				}
				return
			default:
			}
336

337 338 339 340 341 342 343 344 345
			if err != nil {
				errs <- err
			} else if connC == nil {
				errs <- fmt.Errorf("failed to dial %s %s", p, addr)
			} else {
				conns <- connC
			}
		}(addr)
	}
346

347 348 349 350 351 352 353 354 355 356
	err := fmt.Errorf("failed to dial %s", p)
	for i := 0; i < len(remoteAddrs); i++ {
		select {
		case err = <-errs:
			log.Info(err)
		case connC := <-conns:
			// take the first + return asap
			close(foundConn)
			return connC, nil
		}
357
	}
358 359 360 361 362 363 364
	return nil, err
}

func (s *Swarm) dialAddr(ctx context.Context, d *conn.Dialer, p peer.ID, addr ma.Multiaddr) (conn.Conn, error) {
	log.Debugf("%s swarm dialing %s %s", s.local, p, addr)

	connC, err := d.Dial(ctx, addr, p)
365
	if err != nil {
366
		return nil, fmt.Errorf("%s --> %s dial attempt failed: %s", s.local, p, err)
367
	}
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385

	// if the connection is not to whom we thought it would be...
	remotep := connC.RemotePeer()
	if remotep != p {
		connC.Close()
		return nil, fmt.Errorf("misdial to %s through %s (got %s)", p, addr, remotep)
	}

	// if the connection is to ourselves...
	// this can happen TONS when Loopback addrs are advertized.
	// (this should be caught by two checks above, but let's just make sure.)
	if remotep == s.local {
		connC.Close()
		return nil, fmt.Errorf("misdial to %s through %s (got self)", p, addr)
	}

	// success! we got one!
	return connC, nil
386 387
}

388 389
// dialConnSetup is the setup logic for a connection from the dial side. it
// needs to add the Conn to the StreamSwarm, then run newConnSetup
Juan Batiz-Benet's avatar
Juan Batiz-Benet committed
390
func dialConnSetup(ctx context.Context, s *Swarm, connC conn.Conn) (*Conn, error) {
391 392 393 394 395 396 397 398 399 400

	psC, err := s.swarm.AddConn(connC)
	if err != nil {
		// connC is closed by caller if we fail.
		return nil, fmt.Errorf("failed to add conn to ps.Swarm: %s", err)
	}

	// ok try to setup the new connection. (newConnSetup will add to group)
	swarmC, err := s.newConnSetup(ctx, psC)
	if err != nil {
401
		log.Debug("Dial newConnSetup failed. disconnecting.")
402
		log.Event(ctx, "dialFailureDisconnect", lgbl.NetConn(connC), lgbl.Error(err))
403
		psC.Close() // we need to make sure psC is Closed.
404 405 406 407 408
		return nil, err
	}

	return swarmC, err
}