1
0
mirror of https://github.com/ianstormtaylor/slate.git synced 2025-09-02 19:52:32 +02:00

Benchmark consistence && Allow users to select benches to run (#1765)

* Use slate rather than relative path

* Move benchmark to one dir

* Use slate-* instead of relative path

* Before and After Function

* Remove un-necessary cross-env

* Hard fix

* Lint the hard fix

* Reset memory in bench()

* Working on Benchmark Frameworks

* Rename to slate-dev-benchmark

* Add packages

* Fix prettier bug

* Benchmark framework is in working

* Do not log in test

* max times test

* mute logger in test

* add hr time

* Better support for maxTime; add support of split runs to save memory space

* Fix maxTries

* Add global.gc

* Global gc for each bench

* Better test interface

* Test max-time

* Test max-time done

* Add Benchmark among packages

* Starting to get benchmark running

* Pure Node lib

* Change babelrc for pure Node benchmark

* Moving Benchmarks

* Get benchmark and test running

* Get benchmark for slate-html-serializer

* add slate-react

* add slate/changes

* all benchmarks are converted

* Run benchmark by yarn

* Run benchmark with expose-gc

* Annotate Bench.js

* Do not bundle slate-dev-benchmark in rollup

* Add annotation

* Allow config file to enable part benchmark compare

* Add config for compare

* support compare.js

* Do not re-allocate memory; due to a large heap taken influence result

* Render with Decorations

* get active marks at range

* Fix bug in showing percents

* Fix percent showing bug

* chore: add more benches

* Better output of benchmark

* Fix linting

* decoration and normal as different benchmark test

* Fix deserialize benchmark

* README.md

* Fix Readme.md

* README.md

* block-spacing config

* safer user config loading

* use package.json to load package in test

* Consistent linting

* move components to parent directory

* Annotation styling in package

* margin line before multi-line block

* Fix naive bug

* Fix naive bug

* Fix a blank line

* only log user and hr

* Better name

* Better annotation for runBundleTasks

* Fix typo

* Better logger

* Move async to test

* Omit skip

* Only log the user space time

* Single line async sleep

* file name fix

* Fix annotation

* Better output of compare

* Remove get-characters(-at-range) benchmarks

* Restore emoji

* Capitalize types

* Remove compare to another area

* Add grep and config interface

* Linting files

* Linting benchmarks

* Linting benchmarks

* Update yarn.lock
This commit is contained in:
Jinxuan Zhu
2018-07-19 16:01:55 -04:00
committed by Ian Storm Taylor
parent 09c93a6cd4
commit 8f9bfdac2b
79 changed files with 1884 additions and 482 deletions

View File

@@ -1,14 +1,24 @@
/* eslint-disable no-console */
import chalk from 'chalk'
import figures from 'figures'
import emojis from 'emojis'
import baseline from '../../tmp/benchmark-baseline'
import comparison from '../../tmp/benchmark-comparison'
import { existsSync } from 'fs'
/**
* Constants.
*/
const THRESHOLD = 0.333
let THRESHOLD = 0.333
const configPath = '../../tmp/benchmark-config.js'
if (existsSync(configPath)) {
const alternative = require(configPath).THRESHOLD
if (typeof alternative === 'number' && alternative > 0) {
THRESHOLD = alternative
}
}
/**
* Print.
@@ -21,27 +31,72 @@ baseline.forEach((suite, i) => {
console.log(` ${suite.name}`)
suite.benchmarks.forEach((base, j) => {
const comp = comparison[i].benchmarks[j]
if (!comp) return
const compared = { user: {}, hr: {} }
const b = base.iterations / base.elapsed * 1000
const c = comp.iterations / comp.elapsed * 1000
const threshold = b * THRESHOLD
const slower = b - c > threshold
const faster = b - c < 0 - threshold
const percent = Math.round(Math.abs(b - c) / b * 100)
for (const key of Object.keys(compared)) {
const comp = comparison[i].benchmarks[j]
if (!comp) return
const b = base.iterations / base[key] * 1000
const c = comp.iterations / comp[key] * 1000
const balancePercent =
b > c ? Math.round(Math.abs(b - c) / c * 100) : (c - b) / b * 100
let output = `${b.toFixed(2)} ${c.toFixed(2)} ops/sec`
if (slower) output = chalk.red(`${output} (${percent}% slower)`)
else if (faster) output = chalk.green(`${output} (${percent}% faster)`)
else output = chalk.gray(output)
const output = `${b.toFixed(2)} -> ${c.toFixed(2)} ops/sec`
compared[key].baseOutput = output
compared[key].percentOutput = `${balancePercent.toFixed(2)}% ${
c > b ? 'faster' : 'slower'
}`
compared[key].percentValue = balancePercent
compared[key].b = b
compared[key].c = c
compared[key].isFaster = c > b
if (balancePercent > 1000) {
compared[key].percentOutput += emojis.unicode(' :scream: ')
} else if (balancePercent > 100) {
if (c > b) {
compared[key].percentOutput += emojis.unicode(' :raised_hands: ')
} else {
compared[key].percentOutput += emojis.unicode(' :worried: ')
}
}
}
if (percent > 1000) output += ' 😱'
else if (faster && percent > 100) output += ' 🙌'
else if (slower && percent > 100) output += ' 😟'
const { user, hr } = compared
console.log(` ${base.title}`)
console.log(` ${output}`)
if (
user.percentValue < THRESHOLD * 100 &&
hr.percentValue < THRESHOLD * 100
) {
console.log(
chalk.grey(
` ${figures.tick} ${base.name}: ${user.baseOutput} (${
user.percentOutput
})`
)
)
return
}
if (user.isFaster === hr.isFaster) {
if (user.isFaster) {
console.log(chalk.green(` ${figures.star} ${base.name}:`))
console.log(
` user: ${user.baseOutput} (${user.percentOutput})`
)
console.log(` real: ${hr.baseOutput} (${hr.percentOutput})`)
return
}
console.log(chalk.red(` ${figures.cross} ${base.name}:`))
console.log(
` user: ${user.baseOutput} (${user.percentOutput})`
)
console.log(` real: ${hr.baseOutput} (${hr.percentOutput})`)
return
}
console.log(chalk.red(` ${figures.questionMarkPrefix} ${base.name}:`))
console.log(` user: ${user.baseOutput} (${user.percentOutput})`)
console.log(` real: ${hr.baseOutput} (${hr.percentOutput})`)
})
})

View File

@@ -1,31 +0,0 @@
const { stdout } = process
module.exports = function(runner, utils) {
let hasSuite = false
let hasBench = false
runner.on('start', () => {
stdout.write('[')
})
runner.on('end', () => {
stdout.write(']')
})
runner.on('suite start', suite => {
if (hasSuite) stdout.write(',')
stdout.write(`{"name":"${suite.title}","benchmarks":[`)
hasSuite = true
})
runner.on('suite end', suite => {
hasBench = false
stdout.write(']}')
})
runner.on('bench end', bench => {
if (hasBench) stdout.write(',')
stdout.write(JSON.stringify(bench))
hasBench = true
})
}

View File

@@ -11,6 +11,7 @@ import slatePropTypes from '../../packages/slate-prop-types/package.json'
import slateReact from '../../packages/slate-react/package.json'
import slateSchemaViolations from '../../packages/slate-schema-violations/package.json'
import slateSimulator from '../../packages/slate-simulator/package.json'
// Do not import slateDevBenchmark here. The benchmark shall be a pure nodeJS program and can be run without babel-node
const configurations = [
...factory(slate),