Commit c002cc64 authored by Steven Allen's avatar Steven Allen

even fewer allocations

Also, drop the channel lock optimization. Uncontested locks are generally pretty
fast anyways (and we don't even use this code).
parent 90cce793
......@@ -38,9 +38,6 @@ func (s *Chan) ReadFromWithPool(r io.Reader, p *pool.BufferPool) {
// ReadFrom wraps the given io.Reader with a msgio.Reader, reads all
// messages, ands sends them down the channel.
func (s *Chan) readFrom(mr Reader) {
// single reader, no need for Mutex
mr.(*reader).lock = new(nullLocker)
Loop:
for {
buf, err := mr.ReadMsg()
......@@ -74,8 +71,6 @@ func (s *Chan) WriteTo(w io.Writer) {
// if bottleneck, cycle around a set of buffers
mw := NewWriter(w)
// single writer, no need for Mutex
mw.(*writer).lock = new(nullLocker)
Loop:
for {
select {
......
......@@ -76,13 +76,13 @@ type ReadWriteCloser interface {
type writer struct {
W io.Writer
lock sync.Locker
lock sync.Mutex
}
// NewWriter wraps an io.Writer with a msgio framed writer. The msgio.Writer
// will write the length prefix of every message written.
func NewWriter(w io.Writer) WriteCloser {
return &writer{W: w, lock: new(sync.Mutex)}
return &writer{W: w}
}
func (s *writer) Write(msg []byte) (int, error) {
......@@ -114,10 +114,10 @@ func (s *writer) Close() error {
type reader struct {
R io.Reader
lbuf []byte
lbuf [lengthSize]byte
next int
pool *pool.BufferPool
lock sync.Locker
lock sync.Mutex
max int // the maximal message size (in bytes) this reader handles
}
......@@ -137,10 +137,8 @@ func NewReaderWithPool(r io.Reader, p *pool.BufferPool) ReadCloser {
}
return &reader{
R: r,
lbuf: make([]byte, lengthSize),
next: -1,
pool: p,
lock: new(sync.Mutex),
max: defaultMaxSize,
}
}
......@@ -156,7 +154,7 @@ func (s *reader) NextMsgLen() (int, error) {
func (s *reader) nextMsgLen() (int, error) {
if s.next == -1 {
n, err := ReadLen(s.R, s.lbuf)
n, err := ReadLen(s.R, s.lbuf[:])
if err != nil {
return 0, err
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment