|
1 | | -## @naturalcycles/bench-lib |
| 1 | +# bench-lib has been moved to [the monorepo](https://github.com/NaturalCycles/js-libs/) |
2 | 2 |
|
3 | | -> Benchmarking library, based on [Benchmark.js](https://github.com/bestiejs/benchmark.js/) and |
4 | | -> [Autocannon](https://github.com/mcollina/autocannon) |
5 | | -
|
6 | | -[](https://www.npmjs.com/package/@naturalcycles/bench-lib) |
7 | | -[](https://bundlephobia.com/result?p=@naturalcycles/bench-lib) |
8 | | -[](https://github.com/prettier/prettier) |
9 | | - |
10 | | -# Why |
11 | | - |
12 | | -Opinionated, high-level benchmarking library. |
13 | | - |
14 | | -Allows to quickly benchmark your **functions** in a traditional benchmark.js or being served from a |
15 | | -bare Express.js http server. |
16 | | - |
17 | | -See examples below! |
18 | | - |
19 | | -# Show me the code 1 (benchmark.js) |
20 | | - |
21 | | -```typescript |
22 | | -import { runBenchScript } from '@naturalcycles/bench-lib' |
23 | | - |
24 | | -runBenchScript({ |
25 | | - fns: { |
26 | | - noop: done => done.resolve(), |
27 | | - random: done => { |
28 | | - const _ = Math.random() |
29 | | - done.resolve() |
30 | | - }, |
31 | | - timeout: done => { |
32 | | - setTimeout(() => done.resolve(), 0) |
33 | | - }, |
34 | | - immediate: done => { |
35 | | - setImmediate(() => done.resolve()) |
36 | | - }, |
37 | | - asyncAwait: async done => { |
38 | | - await new Promise(resolve => resolve()) |
39 | | - done.resolve() |
40 | | - }, |
41 | | - }, |
42 | | - runs: 2, |
43 | | -}) |
44 | | -``` |
45 | | - |
46 | | -Will print: |
47 | | - |
48 | | -``` |
49 | | -noop x 241,077 ops/sec ±48.87% (31 runs sampled) |
50 | | -random x 280,523 ops/sec ±3.31% (33 runs sampled) |
51 | | -timeout x 768 ops/sec ±0.70% (79 runs sampled) |
52 | | -immediate x 59,573 ops/sec ±1.81% (76 runs sampled) |
53 | | -asyncAwait x 6,749,279 ops/sec ±0.99% (81 runs sampled) |
54 | | -Fastest is asyncAwait |
55 | | -``` |
56 | | - |
57 | | -Will produce [runBench.json](./demo/runBench.json) (numbers are ops/sec, or Hertz): |
58 | | - |
59 | | -```json |
60 | | -{ |
61 | | - "noop": 239344, |
62 | | - "random": 285384, |
63 | | - "timeout": 775, |
64 | | - "immediate": 60214, |
65 | | - "asyncAwait": 6743787 |
66 | | -} |
67 | | -``` |
68 | | - |
69 | | -Will produce [runBench.svg](./demo/runBench.svg) plot: |
70 | | - |
71 | | - |
72 | | - |
73 | | -# Show me the code 2 (autocannon) |
74 | | - |
75 | | -```typescript |
76 | | -import { runCannon, expressFunctionFactory } from '@naturalcycles/bench-lib' |
77 | | -import { _randomInt, pDelay } from '@naturalcycles/js-lib' |
78 | | - |
79 | | -runCannon( |
80 | | - { |
81 | | - noop: expressFunctionFactory(() => 'yo'), |
82 | | - async: expressFunctionFactory(async () => await pDelay(0, 'yo')), |
83 | | - random: expressFunctionFactory(() => _randomInt(1, 10)), |
84 | | - }, |
85 | | - { |
86 | | - runs: 2, |
87 | | - duration: 10, |
88 | | - }, |
89 | | -) |
90 | | -``` |
91 | | - |
92 | | -Will print: |
93 | | - |
94 | | -``` |
95 | | -┌─────────┬──────────┬─────────┬────────────┬───────────┬───────────┬───────────┬───────────────┬────────┬──────────┐ |
96 | | -│ (index) │ name │ rpsAvg │ latencyAvg │ latency50 │ latency90 │ latency99 │ throughputAvg │ errors │ timeouts │ |
97 | | -├─────────┼──────────┼─────────┼────────────┼───────────┼───────────┼───────────┼───────────────┼────────┼──────────┤ |
98 | | -│ 0 │ 'noop' │ 31603.2 │ 3.13 │ 0 │ 1 │ 33 │ 5.21 │ 0 │ 0 │ |
99 | | -│ 1 │ 'async' │ 26502.4 │ 3.77 │ 0 │ 16 │ 41 │ 4.37 │ 0 │ 0 │ |
100 | | -│ 2 │ 'random' │ 32092 │ 3.08 │ 0 │ 0 │ 33 │ 5.21 │ 0 │ 0 │ |
101 | | -└─────────┴──────────┴─────────┴────────────┴───────────┴───────────┴───────────┴───────────────┴────────┴──────────┘ |
102 | | -``` |
103 | | - |
104 | | -Will produce [runCannon.summary.json](./demo/runCannon.json): |
105 | | - |
106 | | -```json |
107 | | -[ |
108 | | - { |
109 | | - "name": "noop", |
110 | | - "rpsAvg": 31603.2, |
111 | | - "latencyAvg": 3.13, |
112 | | - "latency50": 0, |
113 | | - "latency90": 1, |
114 | | - "latency99": 33, |
115 | | - "throughputAvg": 5.21, |
116 | | - "errors": 0, |
117 | | - "timeouts": 0 |
118 | | - }, |
119 | | - ... |
120 | | -] |
121 | | -``` |
122 | | - |
123 | | -Will produce plots: |
124 | | - |
125 | | -  |
126 | | -  |
127 | | - |
128 | | - |
129 | | -# How |
130 | | - |
131 | | -Fundamental difference between Benchmark.js and Autocannon is that the former is doing **serial** |
132 | | -execution (one-after-another), while latter is calling requests **concurrently** (with concurrency |
133 | | -as high as 100, by default). This results in "no-op async function" being executed ~700 times/second |
134 | | -sequentially (needing to do await the "tick" for each Promise), and ~32K times/second (requests per |
135 | | -second) while served from http server (concurrently). |
| 3 | +# This repository is archived. |
0 commit comments