Commit b9dda137 authored by tavit ohanian's avatar tavit ohanian

Merge branch 'port-2021-06-25'

parents aecfdf26 a4e67474
Pipeline #424 passed with stages
in 16 seconds
# File managed by web3-bot. DO NOT EDIT.
# See https://github.com/protocol/.github/ for details.
# Automatically merge pull requests opened by web3-bot, as soon as (and only if) all tests pass.
# This reduces the friction associated with updating with our workflows.
on: [ pull_request ]
name: Automerge
jobs:
automerge-check:
if: github.event.pull_request.user.login == 'web3-bot'
runs-on: ubuntu-latest
outputs:
status: ${{ steps.should-automerge.outputs.status }}
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Check if we should automerge
id: should-automerge
run: |
for commit in $(git rev-list --first-parent origin/${{ github.event.pull_request.base.ref }}..${{ github.event.pull_request.head.sha }}); do
committer=$(git show --format=$'%ce' -s $commit)
echo "Committer: $committer"
if [[ "$committer" != "web3-bot@users.noreply.github.com" ]]; then
echo "Commit $commit wasn't committed by web3-bot, but by $committer."
echo "::set-output name=status::false"
exit
fi
done
echo "::set-output name=status::true"
automerge:
needs: automerge-check
runs-on: ubuntu-latest
if: ${{ needs.automerge-check.outputs.status == 'true' }}
steps:
- name: Wait on tests
uses: lewagon/wait-on-check-action@bafe56a6863672c681c3cf671f5e10b20abf2eaa # v0.2
with:
ref: ${{ github.event.pull_request.head.sha }}
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 10
running-workflow-name: 'automerge' # the name of this job
- name: Merge PR
uses: pascalgn/automerge-action@741c311a47881be9625932b0a0de1b0937aab1ae # v0.13.1
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
MERGE_LABELS: ""
MERGE_METHOD: "squash"
MERGE_DELETE_BRANCH: true
# File managed by web3-bot. DO NOT EDIT.
# See https://github.com/protocol/.github/ for details.
on: [push, pull_request]
name: Go Checks
jobs:
unit:
runs-on: ubuntu-latest
name: All
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions/setup-go@v2
with:
go-version: "1.16.x"
- name: Install staticcheck
run: go install honnef.co/go/tools/cmd/staticcheck@434f5f3816b358fe468fa83dcba62d794e7fe04b # 2021.1 (v0.2.0)
- name: Check that go.mod is tidy
uses: protocol/multiple-go-modules@v1.0
with:
run: |
go mod tidy
if [[ -n $(git ls-files --other --exclude-standard --directory -- go.sum) ]]; then
echo "go.sum was added by go mod tidy"
exit 1
fi
git diff --exit-code -- go.sum go.mod
- name: gofmt
if: ${{ success() || failure() }} # run this step even if the previous one failed
run: |
out=$(gofmt -s -l .)
if [[ -n "$out" ]]; then
echo $out | awk '{print "::error file=" $0 ",line=0,col=0::File is not gofmt-ed."}'
exit 1
fi
- name: go vet
if: ${{ success() || failure() }} # run this step even if the previous one failed
uses: protocol/multiple-go-modules@v1.0
with:
run: go vet ./...
- name: staticcheck
if: ${{ success() || failure() }} # run this step even if the previous one failed
uses: protocol/multiple-go-modules@v1.0
with:
run: |
set -o pipefail
staticcheck ./... | sed -e 's@\(.*\)\.go@./\1.go@g'
# File managed by web3-bot. DO NOT EDIT.
# See https://github.com/protocol/.github/ for details.
on: [push, pull_request]
name: Go Test
jobs:
unit:
strategy:
fail-fast: false
matrix:
os: [ "ubuntu", "windows", "macos" ]
go: [ "1.15.x", "1.16.x" ]
runs-on: ${{ matrix.os }}-latest
name: ${{ matrix.os}} (go ${{ matrix.go }})
steps:
- uses: actions/checkout@v2
with:
submodules: recursive
- uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go }}
- name: Go information
run: |
go version
go env
- name: Run tests
uses: protocol/multiple-go-modules@v1.0
with:
run: go test -v -coverprofile coverage.txt ./...
- name: Run tests (32 bit)
if: ${{ matrix.os != 'macos' }} # can't run 32 bit tests on OSX.
uses: protocol/multiple-go-modules@v1.0
env:
GOARCH: 386
with:
run: go test -v ./...
- name: Run tests with race detector
if: ${{ matrix.os == 'ubuntu' }} # speed things up. Windows and OSX VMs are slow
uses: protocol/multiple-go-modules@v1.0
with:
run: go test -v -race ./...
- name: Upload coverage to Codecov
uses: codecov/codecov-action@a1ed4b322b4b38cb846afb5a0ebfa17086917d27 # v1.5.0
with:
file: coverage.txt
env_vars: OS=${{ matrix.os }}, GO=${{ matrix.go }}
stages:
- build
- test
variables:
BUILD_DIR: "/tmp/$CI_CONCURRENT_PROJECT_ID"
before_script:
- mkdir -p $BUILD_DIR/src
- cd $BUILD_DIR/src
- if [ -d $CI_PROJECT_DIR ]
- then
- echo "soft link $CI_PROJECT_DIR exists"
- else
- echo "creating soft link $CI_PROJECT_DIR"
- ln -s $CI_PROJECT_DIR
- fi
- cd $CI_PROJECT_DIR
build:
stage: build
tags:
- testing
script:
- echo $CI_JOB_STAGE
- go build
test:
stage: test
tags:
- testing
script:
- echo $CI_JOB_STAGE
- go test -cover
coverage: '/coverage: \d+.\d+% of statements/'
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
### Applies to buffer.go and buffer_test.go ###
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
go-buffer-pool
==================
dms3 go-buffer-pool
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](https://protocol.ai)
[![](https://img.shields.io/badge/project-libp2p-yellow.svg?style=flat-square)](https://libp2p.io/)
[![](https://img.shields.io/badge/freenode-%23libp2p-yellow.svg?style=flat-square)](https://webchat.freenode.net/?channels=%23libp2p)
[![codecov](https://codecov.io/gh/libp2p/go-buffer-pool/branch/master/graph/badge.svg)](https://codecov.io/gh/libp2p/go-buffer-pool)
[![Travis CI](https://travis-ci.org/libp2p/go-buffer-pool.svg?branch=master)](https://travis-ci.org/libp2p/go-buffer-pool)
[![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io)
> A variable size buffer pool for go.
## Table of Contents
- [Use Case](#use-case)
- [Advantages over GC](#advantages-over-gc)
- [Disadvantages over GC:](#disadvantages-over-gc)
- [Contribute](#contribute)
- [License](#license)
## Use Case
Use this when you need to repeatedly allocate and free a bunch of temporary buffers of approximately the same size.
### Advantages over GC
* Reduces Memory Usage:
* We don't have to wait for a GC to run before we can reuse memory. This is essential if you're repeatedly allocating large short-lived buffers.
* Reduces CPU usage:
* It takes some load off of the GC (due to buffer reuse).
* We don't have to zero buffers (fewer wasteful memory writes).
### Disadvantages over GC:
* Can leak memory contents. Unlike the go GC, we *don't* zero memory.
* All buffers have a capacity of a power of 2. This is fine if you either (a) actually need buffers with this size or (b) expect these buffers to be temporary.
* Requires that buffers be returned explicitly. This can lead to race conditions and memory corruption if the buffer is released while it's still in use.
## Contribute
PRs are welcome!
Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification.
## License
MIT © Protocol Labs
BSD © The Go Authors
---
The last gx published version of this module was: 0.1.3: QmQDvJoB6aJWN3sjr3xsgXqKCXf4jU5zdMXpDMsBkYVNqa
// This is a derivitive work of Go's bytes.Buffer implementation.
//
// Originally copyright 2009 The Go Authors. All rights reserved.
//
// Modifications copyright 2018 Steven Allen. All rights reserved.
//
// Use of this source code is governed by both a BSD-style and an MIT-style
// license that can be found in the LICENSE_BSD and LICENSE files.
package pool
import (
"io"
)
// Buffer is a buffer like bytes.Buffer that:
//
// 1. Uses a buffer pool.
// 2. Frees memory on read.
//
// If you only have a few buffers and read/write at a steady rate, *don't* use
// this package, it'll be slower.
//
// However:
//
// 1. If you frequently create/destroy buffers, this implementation will be
// significantly nicer to the allocator.
// 2. If you have many buffers with bursty traffic, this implementation will use
// significantly less memory.
type Buffer struct {
// Pool is the buffer pool to use. If nil, this Buffer will use the
// global buffer pool.
Pool *BufferPool
buf []byte
rOff int
// Preallocated slice for samll reads/writes.
// This is *really* important for performance and only costs 8 words.
bootstrap [64]byte
}
// NewBuffer constructs a new buffer initialized to `buf`.
// Unlike `bytes.Buffer`, we *copy* the buffer but don't reuse it (to ensure
// that we *only* use buffers from the pool).
func NewBuffer(buf []byte) *Buffer {
b := new(Buffer)
if len(buf) > 0 {
b.buf = b.getBuf(len(buf))
copy(b.buf, buf)
}
return b
}
// NewBufferString is identical to NewBuffer *except* that it allows one to
// initialize the buffer from a string (without having to allocate an
// intermediate bytes slice).
func NewBufferString(buf string) *Buffer {
b := new(Buffer)
if len(buf) > 0 {
b.buf = b.getBuf(len(buf))
copy(b.buf, buf)
}
return b
}
func (b *Buffer) grow(n int) int {
wOff := len(b.buf)
bCap := cap(b.buf)
if bCap >= wOff+n {
b.buf = b.buf[:wOff+n]
return wOff
}
bSize := b.Len()
minCap := 2*bSize + n
// Slide if cap >= minCap.
// Reallocate otherwise.
if bCap >= minCap {
copy(b.buf, b.buf[b.rOff:])
} else {
// Needs new buffer.
newBuf := b.getBuf(minCap)
copy(newBuf, b.buf[b.rOff:])
b.returnBuf()
b.buf = newBuf
}
b.rOff = 0
b.buf = b.buf[:bSize+n]
return bSize
}
func (b *Buffer) getPool() *BufferPool {
if b.Pool == nil {
return GlobalPool
}
return b.Pool
}
func (b *Buffer) returnBuf() {
if cap(b.buf) > len(b.bootstrap) {
b.getPool().Put(b.buf)
}
b.buf = nil
}
func (b *Buffer) getBuf(n int) []byte {
if n <= len(b.bootstrap) {
return b.bootstrap[:n]
}
return b.getPool().Get(n)
}
// Len returns the number of bytes that can be read from this buffer.
func (b *Buffer) Len() int {
return len(b.buf) - b.rOff
}
// Cap returns the current capacity of the buffer.
//
// Note: Buffer *may* re-allocate when writing (or growing by) `n` bytes even if
// `Cap() < Len() + n` to avoid excessive copying.
func (b *Buffer) Cap() int {
return cap(b.buf)
}
// Bytes returns the slice of bytes currently buffered in the Buffer.
//
// The buffer returned by Bytes is valid until the next call grow, truncate,
// read, or write. Really, just don't touch the Buffer until you're done with
// the return value of this function.
func (b *Buffer) Bytes() []byte {
return b.buf[b.rOff:]
}
// String returns the string representation of the buffer.
//
// It returns `<nil>` the buffer is a nil pointer.
func (b *Buffer) String() string {
if b == nil {
return "<nil>"
}
return string(b.buf[b.rOff:])
}
// WriteString writes a string to the buffer.
//
// This function is identical to Write except that it allows one to write a
// string directly without allocating an intermediate byte slice.
func (b *Buffer) WriteString(buf string) (int, error) {
wOff := b.grow(len(buf))
return copy(b.buf[wOff:], buf), nil
}
// Truncate truncates the Buffer.
//
// Panics if `n > b.Len()`.
//
// This function may free memory by shrinking the internal buffer.
func (b *Buffer) Truncate(n int) {
if n < 0 || n > b.Len() {
panic("truncation out of range")
}
b.buf = b.buf[:b.rOff+n]
b.shrink()
}
// Reset is equivalent to Truncate(0).
func (b *Buffer) Reset() {
b.returnBuf()
b.rOff = 0
}
// ReadByte reads a single byte from the Buffer.
func (b *Buffer) ReadByte() (byte, error) {
if b.rOff >= len(b.buf) {
return 0, io.EOF
}
c := b.buf[b.rOff]
b.rOff++
return c, nil
}
// WriteByte writes a single byte to the Buffer.
func (b *Buffer) WriteByte(c byte) error {
wOff := b.grow(1)
b.buf[wOff] = c
return nil
}
// Grow grows the internal buffer such that `n` bytes can be written without
// reallocating.
func (b *Buffer) Grow(n int) {
wOff := b.grow(n)
b.buf = b.buf[:wOff]
}
// Next is an alternative to `Read` that returns a byte slice instead of taking
// one.
//
// The returned byte slice is valid until the next read, write, grow, or
// truncate.
func (b *Buffer) Next(n int) []byte {
m := b.Len()
if m < n {
n = m
}
data := b.buf[b.rOff : b.rOff+n]
b.rOff += n
return data
}
// Write writes the byte slice to the buffer.
func (b *Buffer) Write(buf []byte) (int, error) {
wOff := b.grow(len(buf))
return copy(b.buf[wOff:], buf), nil
}
// WriteTo copies from the buffer into the given writer until the buffer is
// empty.
func (b *Buffer) WriteTo(w io.Writer) (int64, error) {
if b.rOff < len(b.buf) {
n, err := w.Write(b.buf[b.rOff:])
b.rOff += n
if b.rOff > len(b.buf) {
panic("invalid write count")
}
b.shrink()
return int64(n), err
}
return 0, nil
}
// MinRead is the minimum slice size passed to a Read call by
// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
// what is required to hold the contents of r, ReadFrom will not grow the
// underlying buffer.
const MinRead = 512
// ReadFrom reads from the given reader into the buffer.
func (b *Buffer) ReadFrom(r io.Reader) (int64, error) {
n := int64(0)
for {
wOff := b.grow(MinRead)
// Use *entire* buffer.
b.buf = b.buf[:cap(b.buf)]
read, err := r.Read(b.buf[wOff:])
b.buf = b.buf[:wOff+read]
n += int64(read)
switch err {
case nil:
case io.EOF:
err = nil
fallthrough
default:
b.shrink()
return n, err
}
}
}
// Read reads at most `len(buf)` bytes from the internal buffer into the given
// buffer.
func (b *Buffer) Read(buf []byte) (int, error) {
if len(buf) == 0 {
return 0, nil
}
if b.rOff >= len(b.buf) {
return 0, io.EOF
}
n := copy(buf, b.buf[b.rOff:])
b.rOff += n
b.shrink()
return n, nil
}
func (b *Buffer) shrink() {
c := b.Cap()
// Either nil or bootstrap.
if c <= len(b.bootstrap) {
return
}
l := b.Len()
if l == 0 {
// Shortcut if empty.
b.returnBuf()
b.rOff = 0
} else if l*8 < c {
// Only shrink when capacity > 8x length. Avoids shrinking too aggressively.
newBuf := b.getBuf(l)
copy(newBuf, b.buf[b.rOff:])
b.returnBuf()
b.rOff = 0
b.buf = newBuf[:l]
}
}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Modified by stebalien, 2018
package pool
import (
"bytes"
"math/rand"
"runtime"
"testing"
)
const N = 10000 // make this bigger for a larger (and slower) test
var data string // test data for write tests
var testBytes []byte // test data; same as data but as a slice.
func init() {
testBytes = make([]byte, N)
for i := 0; i < N; i++ {
testBytes[i] = 'a' + byte(i%26)
}
data = string(testBytes)
}
// Verify that contents of buf match the string s.
func check(t *testing.T, testname string, buf *Buffer, s string) {
bytes := buf.Bytes()
str := buf.String()
if buf.Len() != len(bytes) {
t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes))
}
if buf.Len() != len(str) {
t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str))
}
if buf.Len() != len(s) {
t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s))
}
if string(bytes) != s {
t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s)
}
}
// Fill buf through n writes of string fus.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillString(t *testing.T, testname string, buf *Buffer, s string, n int, fus string) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.WriteString(fus)
if m != len(fus) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fus))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += fus
check(t, testname+" (fill 4)", buf, s)
}
return s
}
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string {
check(t, testname+" (fill 1)", buf, s)
for ; n > 0; n-- {
m, err := buf.Write(fub)
if m != len(fub) {
t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub))
}
if err != nil {
t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err)
}
s += string(fub)
check(t, testname+" (fill 4)", buf, s)
}
return s
}
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(testBytes)
check(t, "NewBuffer", buf, data)
}
func TestNewBufferString(t *testing.T) {
buf := NewBufferString(data)
check(t, "NewBufferString", buf, data)
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
check(t, testname+" (empty 1)", buf, s)
for {
n, err := buf.Read(fub)
if n == 0 {
break
}
if err != nil {
t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err)
}
s = s[n:]
check(t, testname+" (empty 3)", buf, s)
}
check(t, testname+" (empty 4)", buf, "")
}
func TestBasicOperations(t *testing.T) {
var buf Buffer
for i := 0; i < 5; i++ {
check(t, "TestBasicOperations (1)", &buf, "")
buf.Reset()
check(t, "TestBasicOperations (2)", &buf, "")
buf.Truncate(0)
check(t, "TestBasicOperations (3)", &buf, "")
n, err := buf.Write([]byte(data[0:1]))
if n != 1 {
t.Errorf("wrote 1 byte, but n == %d", n)
}
if err != nil {
t.Errorf("err should always be nil, but err == %s", err)
}
check(t, "TestBasicOperations (4)", &buf, "a")
buf.WriteByte(data[1])
check(t, "TestBasicOperations (5)", &buf, "ab")
n, err = buf.Write([]byte(data[2:26]))
if err != nil {
t.Fatal(err)
}
if n != 24 {
t.Errorf("wrote 25 bytes, but n == %d", n)
}
check(t, "TestBasicOperations (6)", &buf, string(data[0:26]))
buf.Truncate(26)
check(t, "TestBasicOperations (7)", &buf, string(data[0:26]))
buf.Truncate(20)
check(t, "TestBasicOperations (8)", &buf, string(data[0:20]))
empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5))
empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100))
buf.WriteByte(data[1])
c, err := buf.ReadByte()
if err != nil {
t.Error("ReadByte unexpected eof")
}
if c != data[1] {
t.Errorf("ReadByte wrong value c=%v", c)
}
if _, err = buf.ReadByte(); err == nil {
t.Error("ReadByte unexpected not eof")
}
}
}
func TestLargeStringWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data)
empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeStringWrites (3)", &buf, "")
}
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
limit := 30
if testing.Short() {
limit = 9
}
for i := 3; i < limit; i += 3 {
s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes)
empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i))
}
check(t, "TestLargeByteWrites (3)", &buf, "")
}
func TestLargeStringReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeStringReads (3)", &buf, "")
}
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data)))
}
check(t, "TestLargeByteReads (3)", &buf, "")
}
func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
for i := 0; i < 50; i++ {
wlen := rand.Intn(len(data))
if i%2 == 0 {
s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen])
} else {
s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen])
}
rlen := rand.Intn(len(data))
fub := make([]byte, rlen)
n, _ := buf.Read(fub)
s = s[n:]
}
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
}
func TestNil(t *testing.T) {
var b *Buffer
if b.String() != "<nil>" {
t.Errorf("expected <nil>; got %q", b.String())
}
}
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
b.ReadFrom(&buf)
empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data)))
}
}
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i])
var b Buffer
buf.WriteTo(&b)
empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data)))
}
}
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)
for i := 0; i <= 5; i++ {
for j := i; j <= 5; j++ {
for k := 0; k <= 6; k++ {
// 0 <= i <= j <= 5; 0 <= k <= 6
// Check that if we start with a buffer
// of length j at offset i and ask for
// Next(k), we get the right bytes.
buf := NewBuffer(b[0:j])
n, _ := buf.Read(tmp[0:i])
if n != i {
t.Fatalf("Read %d returned %d", i, n)
}
bb := buf.Next(k)
want := k
if want > j-i {
want = j - i
}
if len(bb) != want {
t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb))
}
for l, v := range bb {
if v != byte(l+i) {
t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i)
}
}
}
}
}
}
func TestGrow(t *testing.T) {
x := []byte{'x'}
y := []byte{'y'}
tmp := make([]byte, 72)
for _, startLen := range []int{0, 100, 1000, 10000, 100000} {
xBytes := bytes.Repeat(x, startLen)
for _, growLen := range []int{0, 100, 1000, 10000, 100000} {
buf := NewBuffer(xBytes)
// If we read, this affects buf.off, which is good to test.
readBytes, _ := buf.Read(tmp)
buf.Grow(growLen)
yBytes := bytes.Repeat(y, growLen)
// Check no allocation occurs in write, as long as we're single-threaded.
var m1, m2 runtime.MemStats
runtime.ReadMemStats(&m1)
buf.Write(yBytes)
runtime.ReadMemStats(&m2)
if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs {
t.Errorf("allocation occurred during write")
}
// Check that buffer has correct data.
if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) {
t.Errorf("bad initial data at %d %d", startLen, growLen)
}
if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) {
t.Errorf("bad written data at %d %d", startLen, growLen)
}
}
}
}
// Was a bug: used to give EOF reading empty slice at EOF.
func TestReadEmptyAtEOF(t *testing.T) {
b := new(Buffer)
slice := make([]byte, 0)
n, err := b.Read(slice)
if err != nil {
t.Errorf("read error: %v", err)
}
if n != 0 {
t.Errorf("wrong count; got %d want 0", n)
}
}
// Tests that we occasionally compact. Issue 5154.
func TestBufferGrowth(t *testing.T) {
var b Buffer
buf := make([]byte, 1024)
b.Write(buf[0:1])
var cap0 int
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
if i == 0 {
cap0 = b.Cap()
}
}
cap1 := b.Cap()
// (*Buffer).grow allows for 2x capacity slop before sliding,
// so set our error threshold at 3x.
if cap1 > cap0*3 {
t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0)
}
}
func BenchmarkWriteByte(b *testing.B) {
const n = 4 << 10
b.SetBytes(n)
buf := NewBuffer(make([]byte, n))
for i := 0; i < b.N; i++ {
buf.Reset()
for i := 0; i < n; i++ {
buf.WriteByte('x')
}
}
}
// From Issue 5154.
func BenchmarkBufferNotEmptyWriteRead(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf[0:1])
for i := 0; i < 5<<10; i++ {
b.Write(buf)
b.Read(buf)
}
}
}
// Check that we don't compact too often. From Issue 5154.
func BenchmarkBufferFullSmallReads(b *testing.B) {
buf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
var b Buffer
b.Write(buf)
for b.Len()+20 < b.Cap() {
b.Write(buf[:10])
}
for i := 0; i < 5<<10; i++ {
b.Read(buf[:1])
b.Write(buf[:1])
}
}
}
coverage:
range: "50...100"
comment: off
// Package pool provides a sync.Pool equivalent that buckets incoming
// requests to one of 32 sub-pools, one for each power of 2, 0-32.
//
// import (pool "gitlab.dms3.io/p2p/go-buffer-pool")
// var p pool.BufferPool
//
// small := make([]byte, 1024)
// large := make([]byte, 4194304)
// p.Put(small)
// p.Put(large)
//
// small2 := p.Get(1024)
// large2 := p.Get(4194304)
// fmt.Println("small2 len:", len(small2))
// fmt.Println("large2 len:", len(large2))
//
// // Output:
// // small2 len: 1024
// // large2 len: 4194304
//
package pool
import (
"math"
"math/bits"
"sync"
)
// GlobalPool is a static Pool for reusing byteslices of various sizes.
var GlobalPool = new(BufferPool)
// MaxLength is the maximum length of an element that can be added to the Pool.
const MaxLength = math.MaxInt32
// BufferPool is a pool to handle cases of reusing elements of varying sizes. It
// maintains 32 internal pools, for each power of 2 in 0-32.
//
// You should generally just call the package level Get and Put methods or use
// the GlobalPool BufferPool instead of constructing your own.
//
// You MUST NOT copy Pool after using.
type BufferPool struct {
pools [32]sync.Pool // a list of singlePools
ptrs sync.Pool
}
type bufp struct {
buf []byte
}
// Get retrieves a buffer of the appropriate length from the buffer pool or
// allocates a new one. Get may choose to ignore the pool and treat it as empty.
// Callers should not assume any relation between values passed to Put and the
// values returned by Get.
//
// If no suitable buffer exists in the pool, Get creates one.
func (p *BufferPool) Get(length int) []byte {
if length == 0 {
return nil
}
if length > MaxLength {
return make([]byte, length)
}
idx := nextLogBase2(uint32(length))
if ptr := p.pools[idx].Get(); ptr != nil {
bp := ptr.(*bufp)
buf := bp.buf[:uint32(length)]
bp.buf = nil
p.ptrs.Put(ptr)
return buf
}
return make([]byte, 1<<idx)[:uint32(length)]
}
// Put adds x to the pool.
func (p *BufferPool) Put(buf []byte) {
capacity := cap(buf)
if capacity == 0 || capacity > MaxLength {
return // drop it
}
idx := prevLogBase2(uint32(capacity))
var bp *bufp
if ptr := p.ptrs.Get(); ptr != nil {
bp = ptr.(*bufp)
} else {
bp = new(bufp)
}
bp.buf = buf
p.pools[idx].Put(bp)
}
// Get retrieves a buffer of the appropriate length from the global buffer pool
// (or allocates a new one).
func Get(length int) []byte {
return GlobalPool.Get(length)
}
// Put returns a buffer to the global buffer pool.
func Put(slice []byte) {
GlobalPool.Put(slice)
}
// Log of base two, round up (for v > 0).
func nextLogBase2(v uint32) uint32 {
return uint32(bits.Len32(v - 1))
}
// Log of base two, round down (for v > 0)
func prevLogBase2(num uint32) uint32 {
next := nextLogBase2(num)
if num == (1 << uint32(next)) {
return next
}
return next - 1
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Pool is no-op under race detector, so all these tests do not work.
// +build !race
package pool
import (
"bytes"
"fmt"
"math/rand"
"runtime"
"runtime/debug"
"testing"
)
func TestAllocations(t *testing.T) {
var m1, m2 runtime.MemStats
runtime.ReadMemStats(&m1)
runtime.GC()
for i := 0; i < 10000; i++ {
b := Get(1010)
Put(b)
}
runtime.GC()
runtime.ReadMemStats(&m2)
frees := m2.Frees - m1.Frees
if frees > 1000 {
t.Fatalf("expected less than 100 frees after GC, got %d", frees)
}
}
func TestRange(t *testing.T) {
min := nextLogBase2(1)
max := nextLogBase2(uint32(MaxLength))
if int(max) != len(GlobalPool.pools)-1 {
t.Errorf("expected %d pools, found %d", max, len(GlobalPool.pools))
}
if min != 0 {
t.Errorf("unused min pool")
}
}
func TestPool(t *testing.T) {
// disable GC so we can control when it happens.
defer debug.SetGCPercent(debug.SetGCPercent(-1))
var p BufferPool
a := make([]byte, 21)
a[0] = 1
b := make([]byte, 2050)
b[0] = 2
p.Put(a)
p.Put(b)
if g := p.Get(16); &g[0] != &a[0] {
t.Fatalf("got [%d,...]; want [1,...]", g[0])
}
if g := p.Get(2048); &g[0] != &b[0] {
t.Fatalf("got [%d,...]; want [2,...]", g[0])
}
if g := p.Get(16); cap(g) != 16 || !bytes.Equal(g[:16], make([]byte, 16)) {
t.Fatalf("got existing slice; want new slice")
}
if g := p.Get(2048); cap(g) != 2048 || !bytes.Equal(g[:2048], make([]byte, 2048)) {
t.Fatalf("got existing slice; want new slice")
}
if g := p.Get(1); cap(g) != 1 || !bytes.Equal(g[:1], make([]byte, 1)) {
t.Fatalf("got existing slice; want new slice")
}
d := make([]byte, 1023)
d[0] = 3
p.Put(d)
if g := p.Get(1024); cap(g) != 1024 || !bytes.Equal(g, make([]byte, 1024)) {
t.Fatalf("got existing slice; want new slice")
}
if g := p.Get(512); cap(g) != 1023 || g[0] != 3 {
t.Fatalf("got [%d,...]; want [3,...]", g[0])
}
p.Put(a)
debug.SetGCPercent(100) // to allow following GC to actually run
runtime.GC()
// For some reason, you need to run GC twice on go 1.16 if you want it to reliably work.
runtime.GC()
if g := p.Get(10); &g[0] == &a[0] {
t.Fatalf("got a; want new slice after GC")
}
}
func TestPoolStressByteSlicePool(t *testing.T) {
var p BufferPool
const P = 10
chs := 10
maxSize := 1 << 16
N := int(1e4)
if testing.Short() {
N /= 100
}
done := make(chan bool)
errs := make(chan error)
for i := 0; i < P; i++ {
go func() {
ch := make(chan []byte, chs+1)
for i := 0; i < chs; i++ {
j := rand.Int() % maxSize
ch <- p.Get(j)
}
for j := 0; j < N; j++ {
r := 0
for i := 0; i < chs; i++ {
v := <-ch
p.Put(v)
r = rand.Int() % maxSize
v = p.Get(r)
if len(v) < r {
errs <- fmt.Errorf("expect len(v) >= %d, got %d", j, len(v))
}
ch <- v
}
if r%1000 == 0 {
runtime.GC()
}
}
done <- true
}()
}
for i := 0; i < P; {
select {
case <-done:
i++
case err := <-errs:
t.Error(err)
}
}
}
func BenchmarkPool(b *testing.B) {
var p BufferPool
b.RunParallel(func(pb *testing.PB) {
i := 7
for pb.Next() {
if i > 1<<20 {
i = 7
} else {
i = i << 1
}
b := p.Get(i)
b[0] = byte(i)
p.Put(b)
}
})
}
func BenchmarkAlloc(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
i := 7
for pb.Next() {
if i > 1<<20 {
i = 7
} else {
i = i << 1
}
b := make([]byte, i)
b[1] = byte(i)
}
})
}
func BenchmarkPoolOverlflow(b *testing.B) {
var p BufferPool
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
bufs := make([][]byte, 2100)
for pow := uint32(0); pow < 21; pow++ {
for i := 0; i < 100; i++ {
bufs = append(bufs, p.Get(1<<pow))
}
}
for _, b := range bufs {
p.Put(b)
}
}
})
}
func ExampleGet() {
buf := Get(100)
fmt.Println("length", len(buf))
fmt.Println("capacity", cap(buf))
Put(buf)
// Output:
// length 100
// capacity 128
}
package pool
import (
"bufio"
"io"
"sync"
)
const WriterBufferSize = 4096
var bufioWriterPool = sync.Pool{
New: func() interface{} {
return bufio.NewWriterSize(nil, WriterBufferSize)
},
}
// Writer is a buffered writer that returns its internal buffer in a pool when
// not in use.
type Writer struct {
W io.Writer
bufw *bufio.Writer
}
func (w *Writer) ensureBuffer() {
if w.bufw == nil {
w.bufw = bufioWriterPool.Get().(*bufio.Writer)
w.bufw.Reset(w.W)
}
}
// Write writes the given byte slice to the underlying connection.
//
// Note: Write won't return the write buffer to the pool even if it ends up
// being empty after the write. You must call Flush() to do that.
func (w *Writer) Write(b []byte) (int, error) {
if w.bufw == nil {
if len(b) >= WriterBufferSize {
return w.W.Write(b)
}
w.bufw = bufioWriterPool.Get().(*bufio.Writer)
w.bufw.Reset(w.W)
}
return w.bufw.Write(b)
}
// Size returns the size of the underlying buffer.
func (w *Writer) Size() int {
return WriterBufferSize
}
// Available returns the amount buffer space available.
func (w *Writer) Available() int {
if w.bufw != nil {
return w.bufw.Available()
}
return WriterBufferSize
}
// Buffered returns the amount of data buffered.
func (w *Writer) Buffered() int {
if w.bufw != nil {
return w.bufw.Buffered()
}
return 0
}
// WriteByte writes a single byte.
func (w *Writer) WriteByte(b byte) error {
w.ensureBuffer()
return w.bufw.WriteByte(b)
}
// WriteRune writes a single rune, returning the number of bytes written.
func (w *Writer) WriteRune(r rune) (int, error) {
w.ensureBuffer()
return w.bufw.WriteRune(r)
}
// WriteString writes a string, returning the number of bytes written.
func (w *Writer) WriteString(s string) (int, error) {
w.ensureBuffer()
return w.bufw.WriteString(s)
}
// Flush flushes the write buffer, if any, and returns it to the pool.
func (w *Writer) Flush() error {
if w.bufw == nil {
return nil
}
if err := w.bufw.Flush(); err != nil {
return err
}
w.bufw.Reset(nil)
bufioWriterPool.Put(w.bufw)
w.bufw = nil
return nil
}
// Close flushes the underlying writer and closes it if it implements the
// io.Closer interface.
//
// Note: Close() closes the writer even if Flush() fails to avoid leaking system
// resources. If you want to make sure Flush() succeeds, call it first.
func (w *Writer) Close() error {
var (
ferr, cerr error
)
ferr = w.Flush()
// always close even if flush fails.
if closer, ok := w.W.(io.Closer); ok {
cerr = closer.Close()
}
if ferr != nil {
return ferr
}
return cerr
}
package pool
import (
"bytes"
"testing"
)
func checkSize(t *testing.T, w *Writer) {
if w.Size()-w.Buffered() != w.Available() {
t.Fatalf("size (%d), buffered (%d), available (%d) mismatch", w.Size(), w.Buffered(), w.Available())
}
}
func TestWriter(t *testing.T) {
var b bytes.Buffer
w := Writer{W: &b}
n, err := w.Write([]byte("foobar"))
checkSize(t, &w)
if err != nil || n != 6 {
t.Fatalf("write failed: %d, %s", n, err)
}
if b.Len() != 0 {
t.Fatal("expected the buffer to be empty")
}
if w.Buffered() != 6 {
t.Fatalf("expected 6 bytes to be buffered, got %d", w.Buffered())
}
checkSize(t, &w)
if err := w.Flush(); err != nil {
t.Fatal(err)
}
checkSize(t, &w)
if err := w.Flush(); err != nil {
t.Fatal(err)
}
checkSize(t, &w)
if b.String() != "foobar" {
t.Fatal("expected to have written foobar")
}
b.Reset()
buf := make([]byte, WriterBufferSize)
n, err = w.Write(buf)
if n != WriterBufferSize || err != nil {
t.Fatalf("write failed: %d, %s", n, err)
}
checkSize(t, &w)
if b.Len() != WriterBufferSize {
t.Fatal("large write should have gone through directly")
}
if err := w.Flush(); err != nil {
t.Fatal(err)
}
checkSize(t, &w)
b.Reset()
if err := w.WriteByte(1); err != nil {
t.Fatal(err)
}
if w.Buffered() != 1 {
t.Fatalf("expected 1 byte to be buffered, got %d", w.Buffered())
}
if n, err := w.WriteRune('1'); err != nil || n != 1 {
t.Fatal(err)
}
if w.Buffered() != 2 {
t.Fatalf("expected 2 bytes to be buffered, got %d", w.Buffered())
}
checkSize(t, &w)
if n, err := w.WriteString("foobar"); err != nil || n != 6 {
t.Fatal(err)
}
if w.Buffered() != 8 {
t.Fatalf("expected 8 bytes to be buffered, got %d", w.Buffered())
}
checkSize(t, &w)
if b.Len() != 0 {
t.Fatal("write should have been buffered")
}
n, err = w.Write(buf)
if n != WriterBufferSize || err != nil {
t.Fatalf("write failed: %d, %s", n, err)
}
if b.Len() != WriterBufferSize || b.Bytes()[0] != 1 || b.String()[1:8] != "1foobar" {
t.Fatalf("failed to flush properly: len:%d, prefix:%#v", b.Len(), b.Bytes()[:10])
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment