Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ name: Benchmarks

on:
push:
branches: [ main ]
branches: [main]
pull_request:
branches: [ main ]
branches: [main]

permissions:
issues: write
Expand Down Expand Up @@ -39,13 +39,13 @@ jobs:
- name: Setup Golang
uses: actions/setup-go@v6
with:
go-version: '1.24'
go-version: "1.24"
cache: true

- name: Run Golang benchmarks
shell: bash
run: |
COUNT=$([[ "${{ github.event_name }}" == "pull_request" ]] && echo 1 || echo 5)
COUNT=$([[ "${{ github.event_name }}" == "pull_request" ]] && echo 10 || echo 15)
LOG_LEVEL=FATAL GOMAXPROCS=4 go test -run='$^' -count=$COUNT -bench=. -benchmem ./... | tee ${{ runner.temp }}/gotool.txt

# Add GitHub job summary for pull requests.
Expand Down
12 changes: 7 additions & 5 deletions cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,19 +129,21 @@ func TestStress(t *testing.T) {
}

func BenchmarkBucketClean(b *testing.B) {
b.StopTimer()

cleaner := NewCleaner(0, nil)
c := NewCache[int](cleaner, nil)

for r := 0; r < b.N; r++ {
for i := 0; i < 1000; i++ {
for b.Loop() {
b.StopTimer()

for i := range 1000 {
c.Get(uint32(i), func() (int, int) { return i, 4 })
}

cleaner.markStale(cleaner.getSize())

b.StartTimer()

size := c.Cleanup()
b.StopTimer()
if size == 0 {
b.FailNow()
}
Expand Down
18 changes: 6 additions & 12 deletions frac/active_ids_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,6 @@ import (
"testing"

"github.com/stretchr/testify/assert"
"go.uber.org/zap"

"github.com/ozontech/seq-db/logger"
)

func TestSeqListAppend(t *testing.T) {
Expand Down Expand Up @@ -36,13 +33,13 @@ func BenchmarkMutexListAppend(b *testing.B) {
gr := 2
mu := sync.Mutex{}
b.SetBytes(int64(gr * 8000000))
for n := 0; n < b.N; n++ {
for b.Loop() {
list := make([]uint64, 0)
wg := sync.WaitGroup{}
wg.Add(gr)
for i := 0; i < gr; i++ {
for range gr {
go func() {
for x := 0; x < 800000; x++ {
for x := range 800000 {
mu.Lock()
list = append(list, uint64(x))
mu.Unlock()
Expand All @@ -51,27 +48,24 @@ func BenchmarkMutexListAppend(b *testing.B) {
}()
}
wg.Wait()
logger.Info("list", zap.Int("total", len(list)))
}

}

func BenchmarkSeqListAppend(b *testing.B) {
gr := 2
b.SetBytes(int64(gr * 8000000))
for n := 0; n < b.N; n++ {
for b.Loop() {
list := NewIDs()
wg := sync.WaitGroup{}
wg.Add(gr)
for i := 0; i < gr; i++ {
for range gr {
go func() {
for x := 0; x < 800000; x++ {
for x := range 800000 {
list.Append(uint64(x))
}
wg.Done()
}()
}
wg.Wait()
logger.Info("list", zap.Uint32("total", list.Len()))
}
}
2 changes: 1 addition & 1 deletion frac/active_indexer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func BenchmarkIndexer(b *testing.B) {

processor := getTestProcessor()

for i := 0; i < b.N; i++ {
for b.Loop() {
b.StopTimer()
bulks := make([][]byte, 0, len(readers))
for _, readNext := range readers {
Expand Down
73 changes: 45 additions & 28 deletions frac/processor/aggregator_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package processor

import (
"fmt"
"math"
"math/rand"
"reflect"
Expand Down Expand Up @@ -89,41 +90,57 @@ func Generate(n int) ([]uint32, uint32) {
}

func BenchmarkAggDeep(b *testing.B) {
v, _ := Generate(b.N)
src := node.NewSourcedNodeWrapper(node.NewStatic(v, false), 0)
iter := NewSourcedNodeIterator(src, nil, make([]uint32, 1), iteratorLimit{limit: 0, err: consts.ErrTooManyGroupTokens}, false)
n := NewSingleSourceCountAggregator(iter, provideExtractTimeFunc(nil, nil, 0))
vals, _ := Generate(b.N)
b.ResetTimer()
for _, v := range vals {
if err := n.Next(v); err != nil {
b.Fatal(err)
}
sizes := []int{1_000, 10_000, 1_000_000}

for _, s := range sizes {
b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) {
v, _ := Generate(s)
src := node.NewSourcedNodeWrapper(node.NewStatic(v, false), 0)
iter := NewSourcedNodeIterator(src, nil, make([]uint32, 1), iteratorLimit{limit: 0, err: consts.ErrTooManyGroupTokens}, false)
n := NewSingleSourceCountAggregator(iter, provideExtractTimeFunc(nil, nil, 0))
vals, _ := Generate(s)

for b.Loop() {
for _, v := range vals {
if err := n.Next(v); err != nil {
b.Fatal(err)
}
}
}
})
}
}

func BenchmarkAggWide(b *testing.B) {
v, _ := Generate(b.N)
sizes := []int{1_000, 10_000, 1_000_000}

for _, s := range sizes {
b.Run(fmt.Sprintf("size=%d", s), func(b *testing.B) {
v, _ := Generate(s)

factor := int(math.Sqrt(float64(s)))
wide := make([][]uint32, s/factor)
for i := range wide {
for range factor {
wide[i] = append(wide[i], v[rand.Intn(s)])
}
slices.Sort(wide[i])
}

factor := int(math.Sqrt(float64(b.N)))
wide := make([][]uint32, b.N/factor)
for i := range wide {
for range factor {
wide[i] = append(wide[i], v[rand.Intn(b.N)])
}
slices.Sort(wide[i])
}
source := node.BuildORTreeAgg(node.MakeStaticNodes(wide), false)

source := node.BuildORTreeAgg(node.MakeStaticNodes(wide), false)
iter := NewSourcedNodeIterator(source, nil, make([]uint32, len(wide)), iteratorLimit{limit: 0, err: consts.ErrTooManyGroupTokens}, false)
n := NewSingleSourceCountAggregator(iter, provideExtractTimeFunc(nil, nil, 0))
vals, _ := Generate(s)

iter := NewSourcedNodeIterator(source, nil, make([]uint32, len(wide)), iteratorLimit{limit: 0, err: consts.ErrTooManyGroupTokens}, false)
n := NewSingleSourceCountAggregator(iter, provideExtractTimeFunc(nil, nil, 0))
vals, _ := Generate(b.N)
b.ResetTimer()
for _, v := range vals {
if err := n.Next(v); err != nil {
b.Fatal(err)
}
for b.Loop() {
for _, v := range vals {
if err := n.Next(v); err != nil {
b.Fatal(err)
}
}
}
})
}
}

Expand Down
6 changes: 3 additions & 3 deletions indexer/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func BenchmarkParseESTime(b *testing.B) {
const toParseRFC3339 = "2024-04-19T18:04:25.999Z"

b.Run("es_stdlib", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_, err := time.Parse(consts.ESTimeFormat, toParse)
if err != nil {
b.Fatal(err)
Expand All @@ -98,7 +98,7 @@ func BenchmarkParseESTime(b *testing.B) {
})

b.Run("handwritten", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_, ok := parseESTime(toParse)
if !ok {
b.Fatal()
Expand All @@ -107,7 +107,7 @@ func BenchmarkParseESTime(b *testing.B) {
})

b.Run("rfc3339", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_, err := time.Parse(time.RFC3339, toParseRFC3339)
if err != nil {
b.Fatal(err)
Expand Down
Loading
Loading