summaryrefslogtreecommitdiffhomepage
path: root/pkg/buffer/view.go
diff options
context:
space:
mode:
authorTing-Yu Wang <anivia@google.com>2021-05-13 13:54:04 -0700
committergVisor bot <gvisor-bot@google.com>2021-05-13 13:56:16 -0700
commit84f04cc858644e9748a82f33b834a84c8b0fc934 (patch)
tree011d6915a666ea978a7b5efb7397757cef3370e0 /pkg/buffer/view.go
parentbaa0888f114c586ea490d49a23c3d828fd739b85 (diff)
Migrate PacketBuffer to use pkg/buffer
Benchmark iperf3: Before After native->runsc 5.14 5.01 (Gbps) runsc->native 4.15 4.07 (Gbps) It did introduce overhead, mainly at the bridge between pkg/buffer and VectorisedView, the ExtractVV method. Once endpoints start migrating away from VV, this overhead will be gone. Updates #2404 PiperOrigin-RevId: 373651666
Diffstat (limited to 'pkg/buffer/view.go')
-rw-r--r--pkg/buffer/view.go179
1 files changed, 177 insertions, 2 deletions
diff --git a/pkg/buffer/view.go b/pkg/buffer/view.go
index 00652d675..7bcfcd543 100644
--- a/pkg/buffer/view.go
+++ b/pkg/buffer/view.go
@@ -19,6 +19,9 @@ import (
"io"
)
+// Buffer is an alias to View.
+type Buffer = View
+
// View is a non-linear buffer.
//
// All methods are thread compatible.
@@ -39,6 +42,51 @@ func (v *View) TrimFront(count int64) {
}
}
+// Remove deletes data at specified location in v. It returns false if specified
+// range does not fully reside in v.
+func (v *View) Remove(offset, length int) bool {
+ if offset < 0 || length < 0 {
+ return false
+ }
+ tgt := Range{begin: offset, end: offset + length}
+ if tgt.Len() != tgt.Intersect(Range{end: int(v.size)}).Len() {
+ return false
+ }
+
+ // Scan through each buffer and remove intersections.
+ var curr Range
+ for buf := v.data.Front(); buf != nil; {
+ origLen := buf.ReadSize()
+ curr.end = curr.begin + origLen
+
+ if x := curr.Intersect(tgt); x.Len() > 0 {
+ if !buf.Remove(x.Offset(-curr.begin)) {
+ panic("buf.Remove() failed")
+ }
+ if buf.ReadSize() == 0 {
+ // buf fully removed, removing it from the list.
+ oldBuf := buf
+ buf = buf.Next()
+ v.data.Remove(oldBuf)
+ v.pool.put(oldBuf)
+ } else {
+ // Only partial data intersects, moving on to next one.
+ buf = buf.Next()
+ }
+ v.size -= int64(x.Len())
+ } else {
+ // This buffer is not in range, moving on to next one.
+ buf = buf.Next()
+ }
+
+ curr.begin += origLen
+ if curr.begin >= tgt.end {
+ break
+ }
+ }
+ return true
+}
+
// ReadAt implements io.ReaderAt.ReadAt.
func (v *View) ReadAt(p []byte, offset int64) (int, error) {
var (
@@ -81,7 +129,6 @@ func (v *View) advanceRead(count int64) {
oldBuf := buf
buf = buf.Next() // Iterate.
v.data.Remove(oldBuf)
- oldBuf.Reset()
v.pool.put(oldBuf)
// Update counts.
@@ -118,7 +165,6 @@ func (v *View) Truncate(length int64) {
// Drop the buffer completely; see above.
v.data.Remove(buf)
- buf.Reset()
v.pool.put(buf)
v.size -= sz
}
@@ -224,6 +270,78 @@ func (v *View) Append(data []byte) {
}
}
+// AppendOwned takes ownership of data and appends it to v.
+func (v *View) AppendOwned(data []byte) {
+ if len(data) > 0 {
+ buf := v.pool.getNoInit()
+ buf.initWithData(data)
+ v.data.PushBack(buf)
+ v.size += int64(len(data))
+ }
+}
+
+// PullUp makes the specified range contiguous and returns the backing memory.
+func (v *View) PullUp(offset, length int) ([]byte, bool) {
+ if length == 0 {
+ return nil, true
+ }
+ tgt := Range{begin: offset, end: offset + length}
+ if tgt.Intersect(Range{end: int(v.size)}).Len() != length {
+ return nil, false
+ }
+
+ curr := Range{}
+ buf := v.data.Front()
+ for ; buf != nil; buf = buf.Next() {
+ origLen := buf.ReadSize()
+ curr.end = curr.begin + origLen
+
+ if x := curr.Intersect(tgt); x.Len() == tgt.Len() {
+ // buf covers the whole requested target range.
+ sub := x.Offset(-curr.begin)
+ return buf.ReadSlice()[sub.begin:sub.end], true
+ } else if x.Len() > 0 {
+ // buf is pointing at the starting buffer we want to merge.
+ break
+ }
+
+ curr.begin += origLen
+ }
+
+ // Calculate the total merged length.
+ totLen := 0
+ for n := buf; n != nil; n = n.Next() {
+ totLen += n.ReadSize()
+ if curr.begin+totLen >= tgt.end {
+ break
+ }
+ }
+
+ // Merge the buffers.
+ data := make([]byte, totLen)
+ off := 0
+ for n := buf; n != nil && off < totLen; {
+ copy(data[off:], n.ReadSlice())
+ off += n.ReadSize()
+
+ // Remove buffers except for the first one, which will be reused.
+ if n == buf {
+ n = n.Next()
+ } else {
+ old := n
+ n = n.Next()
+ v.data.Remove(old)
+ v.pool.put(old)
+ }
+ }
+
+ // Update the first buffer with merged data.
+ buf.initWithData(data)
+
+ r := tgt.Offset(-curr.begin)
+ return buf.data[r.begin:r.end], true
+}
+
// Flatten returns a flattened copy of this data.
//
// This method should not be used in any performance-sensitive paths. It may
@@ -267,6 +385,27 @@ func (v *View) Apply(fn func([]byte)) {
}
}
+// SubApply applies fn to a given range of data in v. Any part of the range
+// outside of v is ignored.
+func (v *View) SubApply(offset, length int, fn func([]byte)) {
+ for buf := v.data.Front(); length > 0 && buf != nil; buf = buf.Next() {
+ d := buf.ReadSlice()
+ if offset >= len(d) {
+ offset -= len(d)
+ continue
+ }
+ if offset > 0 {
+ d = d[offset:]
+ offset = 0
+ }
+ if length < len(d) {
+ d = d[:length]
+ }
+ fn(d)
+ length -= len(d)
+ }
+}
+
// Merge merges the provided View with this one.
//
// The other view will be appended to v, and other will be empty after this
@@ -389,3 +528,39 @@ func (v *View) ReadToWriter(w io.Writer, count int64) (int64, error) {
}
return done, err
}
+
+// A Range specifies a range of buffer.
+type Range struct {
+ begin int
+ end int
+}
+
+// Intersect returns the intersection of x and y.
+func (x Range) Intersect(y Range) Range {
+ if x.begin < y.begin {
+ x.begin = y.begin
+ }
+ if x.end > y.end {
+ x.end = y.end
+ }
+ if x.begin >= x.end {
+ return Range{}
+ }
+ return x
+}
+
+// Offset returns x offset by off.
+func (x Range) Offset(off int) Range {
+ x.begin += off
+ x.end += off
+ return x
+}
+
+// Len returns the length of x.
+func (x Range) Len() int {
+ l := x.end - x.begin
+ if l < 0 {
+ l = 0
+ }
+ return l
+}