Commit 1d06b0e5 authored by Steven Allen's avatar Steven Allen

chore: remove deprecated logging

parent 99c6798f
......@@ -382,7 +382,7 @@ func (bs *Bitswap) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []b
if from != "" {
for _, b := range wanted {
log.Event(ctx, "Bitswap.GetBlockRequest.End", b.Cid())
log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid())
}
}
......@@ -417,7 +417,7 @@ func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg
// Process blocks
err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves)
if err != nil {
log.Warningf("ReceiveMessage recvBlockFrom error: %s", err)
log.Warnf("ReceiveMessage recvBlockFrom error: %s", err)
return
}
}
......
......@@ -1092,12 +1092,12 @@ func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelo
select {
case env, ok := <-next: // blocks till next envelope ready
if !ok {
log.Warningf("got closed channel")
log.Warnf("got closed channel")
return nil, nil
}
return nil, env
case <-ctx.Done():
// log.Warningf("got timeout")
// log.Warnf("got timeout")
}
return next, nil
}
......
......@@ -77,7 +77,7 @@ func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid
remaining := cid.NewSet()
promise := notif.Subscribe(ctx, keys...)
for _, k := range keys {
log.Event(ctx, "Bitswap.GetBlockRequest.Start", k)
log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k)
remaining.Add(k)
}
......
......@@ -41,7 +41,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID {
for _, p := range peers {
counted += float64(prt.getPeerCount(p)) / float64(total)
if counted > rnd {
// log.Warningf(" chose %s from %s (%d) / %s (%d) with pivot %.2f",
// log.Warnf(" chose %s from %s (%d) / %s (%d) with pivot %.2f",
// lu.P(p), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd)
return p
}
......@@ -51,7 +51,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID {
// math that doesn't quite cover the whole range of peers in the for loop
// so just choose the last peer.
index := len(peers) - 1
// log.Warningf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f",
// log.Warnf(" chose last (indx %d) %s from %s (%d) / %s (%d) with pivot %.2f",
// index, lu.P(peers[index]), lu.P(peers[0]), prt.firstResponder[peers[0]], lu.P(peers[1]), prt.firstResponder[peers[1]], rnd)
return peers[index]
}
......
......@@ -210,13 +210,13 @@ func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontH
// // log.Infof("Ses%d<-%s: %d blocks, %d haves, %d dont haves\n",
// // s.id, from, len(interestedKs), len(wantedHaves), len(wantedDontHaves))
// for _, c := range interestedKs {
// log.Warningf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// log.Warnf("Ses%d %s<-%s: block %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// }
// for _, c := range haves {
// log.Warningf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// log.Warnf("Ses%d %s<-%s: HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// }
// for _, c := range dontHaves {
// log.Warningf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// log.Warnf("Ses%d %s<-%s: DONT_HAVE %s\n", s.id, lu.P(s.self), lu.P(from), lu.C(c))
// }
// }
......@@ -306,9 +306,9 @@ func (s *Session) run(ctx context.Context) {
func (s *Session) handleIdleTick(ctx context.Context) {
live := s.sw.PrepareBroadcast()
// log.Warningf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live))
// log.Warnf("\n\n\n\n\nSes%d: broadcast %d keys\n\n\n\n\n", s.id, len(live))
// log.Infof("Ses%d: broadcast %d keys\n", s.id, len(live))
log.Warningf("Ses%d: broadcast %d keys", s.id, len(live))
log.Warnf("Ses%d: broadcast %d keys", s.id, len(live))
// Broadcast a want-have for the live wants to everyone we're connected to
s.sprm.RecordPeerRequests(nil, live)
......@@ -387,7 +387,7 @@ func (s *Session) resetIdleTick() {
tickDelay = s.initialSearchDelay
} else {
avLat := s.latencyTrkr.averageLatency()
// log.Warningf("averageLatency %s", avLat)
// log.Warnf("averageLatency %s", avLat)
tickDelay = s.baseTickDelay + (3 * avLat)
}
tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks)
......
......@@ -75,7 +75,7 @@ func (wm *WantManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Ci
// BroadcastWantHaves is called when want-haves should be broadcast to all
// connected peers (as part of session discovery)
func (wm *WantManager) BroadcastWantHaves(ctx context.Context, ses uint64, wantHaves []cid.Cid) {
// log.Warningf("BroadcastWantHaves session%d: %s", ses, wantHaves)
// log.Warnf("BroadcastWantHaves session%d: %s", ses, wantHaves)
// Record broadcast wants
wm.bcwl.Add(wantHaves, ses)
......
......@@ -135,7 +135,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.
}
if err := s.SetWriteDeadline(deadline); err != nil {
log.Warningf("error setting deadline: %s", err)
log.Warnf("error setting deadline: %s", err)
}
// Older Bitswap versions use a slightly different wire format so we need
......@@ -157,7 +157,7 @@ func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.
}
if err := s.SetWriteDeadline(time.Time{}); err != nil {
log.Warningf("error resetting deadline: %s", err)
log.Warnf("error resetting deadline: %s", err)
}
return nil
}
......
......@@ -8,7 +8,6 @@ import (
bsmsg "github.com/ipfs/go-bitswap/message"
pb "github.com/ipfs/go-bitswap/message/pb"
cid "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
process "github.com/jbenet/goprocess"
procctx "github.com/jbenet/goprocess/context"
)
......@@ -41,10 +40,10 @@ func (bs *Bitswap) startWorkers(ctx context.Context, px process.Process) {
}
func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
idmap := logging.LoggableMap{"ID": id}
defer log.Debug("bitswap task worker shutting down...")
log := log.With("ID", id)
for {
log.Event(ctx, "Bitswap.TaskWorker.Loop", idmap)
log.Debug("Bitswap.TaskWorker.Loop")
select {
case nextEnvelope := <-bs.engine.Outbox():
select {
......@@ -57,13 +56,10 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
// TODO: Should only track *useful* messages in ledger
outgoing := bsmsg.New(false)
for _, block := range envelope.Message.Blocks() {
log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableF(func() map[string]interface{} {
return logging.LoggableMap{
"ID": id,
"Target": envelope.Peer.Pretty(),
"Block": block.Cid().String(),
}
}))
log.Debugw("Bitswap.TaskWorker.Work",
"Target", envelope.Peer,
"Block", block.Cid(),
)
outgoing.AddBlock(block)
}
for _, blockPresence := range envelope.Message.BlockPresences() {
......@@ -143,9 +139,9 @@ func (bs *Bitswap) provideWorker(px process.Process) {
// replace token when done
<-limit
}()
ev := logging.LoggableMap{"ID": wid}
defer log.EventBegin(ctx, "Bitswap.ProvideWorker.Work", ev, k).Done()
log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k)
defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k)
ctx, cancel := context.WithTimeout(ctx, provideTimeout) // timeout ctx
defer cancel()
......@@ -158,8 +154,7 @@ func (bs *Bitswap) provideWorker(px process.Process) {
// worker spawner, reads from bs.provideKeys until it closes, spawning a
// _ratelimited_ number of workers to handle each key.
for wid := 2; ; wid++ {
ev := logging.LoggableMap{"ID": 1}
log.Event(ctx, "Bitswap.ProvideWorker.Loop", ev)
log.Debug("Bitswap.ProvideWorker.Loop")
select {
case <-px.Closing():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment