Commit 1d06b0e5 authored by Steven Allen's avatar Steven Allen

chore: remove deprecated logging

parent 99c6798f
...@@ -382,7 +382,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b ...@@ -382,7 +382,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b
if from != "" { if from != "" {
for _, b := range wanted { for _, b := range wanted {
log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid()) log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid())
} }
} }
...@@ -417,7 +417,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg ...@@ -417,7 +417,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg
// Process blocks // Process blocks
err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves)
if err != nil { if err != nil {
log.Warningf("ReceiveMessage recvBlockFrom error: %s", err) log.Warnf("ReceiveMessage recvBlockFrom error: %s", err)
return return
} }
} }
......
...@@ -1092,12 +1092,12 @@ func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelo ...@@ -1092,12 +1092,12 @@ func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelo
select { select {
case env, ok := <-next: // blocks till next envelope ready case env, ok := <-next: // blocks till next envelope ready
if !ok { if !ok {
log.Warningf("got closed channel") log.Warnf("got closed channel")
return nil, nil return nil, nil
} }
return nil, env return nil, env
case <-ctx.Done(): case <-ctx.Done():
// log.Warningf("got timeout") // log.Warnf("got timeout")
} }
return next, nil return next, nil
} }
......
...@@ -77,7 +77,7 @@ func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid ...@@ -77,7 +77,7 @@ func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid
remaining := cid.NewSet() remaining := cid.NewSet()
promise := notif.Subscribe(ctx, keys...) promise := notif.Subscribe(ctx, keys...)
for _, k := range keys { for _, k := range keys {
log.Event(ctx, "Bitswap.GetBlockRequest.Start", k) log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k)
remaining.Add(k) remaining.Add(k)
} }
......
...@@ -41,7 +41,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { ...@@ -41,7 +41,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID {
for _, p := range peers { for _, p := range peers {
counted += float64(prt.getPeerCount(p)) / float64(total) counted += float64(prt.getPeerCount(p)) / float64(total)
if counted > rnd { if counted > rnd {
// log.Warningf(" chose %s from %s (%d) / %s (%d) with pivot %.2f", // log.Warnf(" chose %s from %s (%d) / %s (%d) with pivot %.2f",
// lu.P(p), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) // lu.P(p), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd)
return p return p
} }
...@@ -51,7 +51,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { ...@@ -51,7 +51,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID {
// math that doesn't quite cover the whole range of peers in the for loop // math that doesn't quite cover the whole range of peers in the for loop
// so just choose the last peer. // so just choose the last peer.
index := len(peers) - 1 index := len(peers) - 1
// log.Warningf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f", // log.Warnf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f",
// index, lu.P(peers[index]), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd) // index, lu.P(peers[index]), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd)
return peers[index] return peers[index]
} }
......
...@@ -210,13 +210,13 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH ...@@ -210,13 +210,13 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH
// // log.Infof("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n", // // log.Infof("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n",
// // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves)) // // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves))
// for _, c := range interestedKs { // for _, c := range interestedKs {
// log.Warningf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) // log.Warnf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// } // }
// for _, c := range haves { // for _, c := range haves {
// log.Warningf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) // log.Warnf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// } // }
// for _, c := range dontHaves { // for _, c := range dontHaves {
// log.Warningf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c)) // log.Warnf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// } // }
// } // }
...@@ -306,9 +306,9 @@ func (s *Session) run(ctx context.Context) { ...@@ -306,9 +306,9 @@ func (s *Session) run(ctx context.Context) {
func (s *Session) handleIdleTick(ctx context.Context) { func (s *Session) handleIdleTick(ctx context.Context) {
live := s.sw.PrepareBroadcast() live := s.sw.PrepareBroadcast()
// log.Warningf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live)) // log.Warnf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live))
// log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live)) // log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live))
log.Warningf("Ses%d: broadcast %d keys", s.id, len(live)) log.Warnf("Ses%d: broadcast %d keys", s.id, len(live))
// Broadcast a want-have for the live wants to everyone we're connected to // Broadcast a want-have for the live wants to everyone we're connected to
s.sprm.RecordPeerRequests(nil, live) s.sprm.RecordPeerRequests(nil, live)
...@@ -387,7 +387,7 @@ func (s *Session) resetIdleTick() { ...@@ -387,7 +387,7 @@ func (s *Session) resetIdleTick() {
tickDelay = s.initialSearchDelay tickDelay = s.initialSearchDelay
} else { } else {
avLat := s.latencyTrkr.averageLatency() avLat := s.latencyTrkr.averageLatency()
// log.Warningf("averageLatency %s", avLat) // log.Warnf("averageLatency %s", avLat)
tickDelay = s.baseTickDelay + (3 * avLat) tickDelay = s.baseTickDelay + (3 * avLat)
} }
tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks)
......
...@@ -75,7 +75,7 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci ...@@ -75,7 +75,7 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci
// BroadcastWantHaves is called when want-haves should be broadcast to all // BroadcastWantHaves is called when want-haves should be broadcast to all
// connected peers (as part of session discovery) // connected peers (as part of session discovery)
func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) { func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) {
// log.Warningf("BroadcastWantHaves session%d: %s", ses, wantHaves) // log.Warnf("BroadcastWantHaves session%d: %s", ses, wantHaves)
// Record broadcast wants // Record broadcast wants
wm.bcwl.Add(wantHaves, ses) wm.bcwl.Add(wantHaves, ses)
......
...@@ -135,7 +135,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. ...@@ -135,7 +135,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.
} }
if err := s.SetWriteDeadline(deadline); err != nil { if err := s.SetWriteDeadline(deadline); err != nil {
log.Warningf("error setting deadline: %s", err) log.Warnf("error setting deadline: %s", err)
} }
// Older Bitswap versions use a slightly different wire format so we need // Older Bitswap versions use a slightly different wire format so we need
...@@ -157,7 +157,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg. ...@@ -157,7 +157,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.
} }
if err := s.SetWriteDeadline(time.Time{}); err != nil { if err := s.SetWriteDeadline(time.Time{}); err != nil {
log.Warningf("error resetting deadline: %s", err) log.Warnf("error resetting deadline: %s", err)
} }
return nil return nil
} }
......
...@@ -8,7 +8,6 @@ import ( ...@@ -8,7 +8,6 @@ import (
bsmsg "github.com/ipfs/go-bitswap/message" bsmsg "github.com/ipfs/go-bitswap/message"
pb "github.com/ipfs/go-bitswap/message/pb" pb "github.com/ipfs/go-bitswap/message/pb"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
process "github.com/jbenet/goprocess" process "github.com/jbenet/goprocess"
procctx "github.com/jbenet/goprocess/context" procctx "github.com/jbenet/goprocess/context"
) )
...@@ -41,10 +40,10 @@ func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) { ...@@ -41,10 +40,10 @@ func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) {
} }
func (bs *Bitswap) taskWorker(ctx context.Context, id int) { func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
idmap := logging.LoggableMap{"ID": id}
defer log.Debug("bitswap task worker shutting down...") defer log.Debug("bitswap task worker shutting down...")
log := log.With("ID", id)
for { for {
log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap) log.Debug("Bitswap.TaskWorker.Loop")
select { select {
case nextEnvelope := <-bs.engine.Outbox(): case nextEnvelope := <-bs.engine.Outbox():
select { select {
...@@ -57,13 +56,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) { ...@@ -57,13 +56,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
// TODO: Should only track *useful* messages in ledger // TODO: Should only track *useful* messages in ledger
outgoing := bsmsg.New(false) outgoing := bsmsg.New(false)
for _, block := range envelope.Message.Blocks() { for _, block := range envelope.Message.Blocks() {
log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableF(func() map[string]interface{} { log.Debugw("Bitswap.TaskWorker.Work",
return logging.LoggableMap{ "Target", envelope.Peer,
"ID": id, "Block", block.Cid(),
"Target": envelope.Peer.Pretty(), )
"Block": block.Cid().String(),
}
}))
outgoing.AddBlock(block) outgoing.AddBlock(block)
} }
for _, blockPresence := range envelope.Message.BlockPresences() { for _, blockPresence := range envelope.Message.BlockPresences() {
...@@ -143,9 +139,9 @@ func (bs *Bitswap) provideWorker(px process.Process) { ...@@ -143,9 +139,9 @@ func (bs *Bitswap) provideWorker(px process.Process) {
// replace token when done // replace token when done
<-limit <-limit
}() }()
ev := logging.LoggableMap{"ID": wid}
defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, k).Done() log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k)
defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k)
ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
defer cancel() defer cancel()
...@@ -158,8 +154,7 @@ func (bs *Bitswap) provideWorker(px process.Process) { ...@@ -158,8 +154,7 @@ func (bs *Bitswap) provideWorker(px process.Process) {
// worker spawner, reads from bs.provideKeys until it closes, spawning a // worker spawner, reads from bs.provideKeys until it closes, spawning a
// _ratelimited_ number of workers to handle each key. // _ratelimited_ number of workers to handle each key.
for wid := 2; ; wid++ { for wid := 2; ; wid++ {
ev := logging.LoggableMap{"ID": 1} log.Debug("Bitswap.ProvideWorker.Loop")
log.Event(ctx, "Bitswap.ProvideWorker.Loop", ev)
select { select {
case <-px.Closing(): case <-px.Closing():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment