1
0
mirror of https://github.com/ianstormtaylor/slate.git synced 2025-08-29 18:09:49 +02:00

Refactor benchmarks (#675)

* first stab

* refactor to nanobench

* refactor to matcha

* use hand-rolled comparison logic, ugh

* update threshold

* remove unused dependencies

* remove benchmarks from travis ci
This commit is contained in:
Ian Storm Taylor
2017-03-21 17:38:39 -07:00
committed by GitHub
parent 14193c30f0
commit 6ab686ae77
53 changed files with 357 additions and 654 deletions

3
.gitignore vendored
View File

@@ -3,10 +3,9 @@ dist
examples/build.dev.js
examples/build.prod.js
lib
perf/reference.json
test/support/build.js
# Temporary files.
bench/output
tmp
# Gitbook files.

View File

@@ -3,13 +3,10 @@ env:
matrix:
- TEST_TYPE=test
- TEST_TYPE=lint
- TEST_TYPE=benchmarks
script:
- |
if [ "$TEST_TYPE" = test ]; then
npm test
elif [ "$TEST_TYPE" = benchmarks ]; then
npm run perf
npm run test
elif [ "$TEST_TYPE" = lint ]; then
npm run lint
fi

39
bench/compare.js Normal file
View File

@@ -0,0 +1,39 @@
/* eslint-disable no-console */
import chalk from 'chalk'
import baseline from '../tmp/bench-baseline'
import comparison from '../tmp/bench-comparison'
/**
* Constants.
*/
const THRESHOLD = 0.2
/**
* Print.
*/
baseline.forEach((suite, i) => {
console.log()
console.log(` ${suite.name}`)
suite.benchmarks.forEach((base, j) => {
const comp = comparison[i].benchmarks[j]
const b = (base.iterations / base.elapsed)
const c = (comp.iterations / comp.elapsed)
const threshold = b * THRESHOLD
const slower = (b - c) > threshold
const faster = (b - c) < (0 - threshold)
const percent = Math.round(Math.abs(b - c) / b * 100)
let output = `${b.toFixed(2)} --> ${c.toFixed(2)} iterations/sec`
if (slower) output = chalk.red(`${output} (${percent}% slower)`)
if (faster) output = chalk.green(`${output} (${percent}% faster)`)
console.log(` ${base.title}`)
console.log(` ${output}`)
})
console.log()
})

View File

@@ -0,0 +1,19 @@
export function before(state) {
return state
.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
state
.transform()
.deleteBackward()
.apply()
}

View File

@@ -0,0 +1,19 @@
export function before(state) {
return state
.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
state
.transform()
.deleteBackward()
.apply()
}

View File

@@ -0,0 +1,19 @@
export function before(state) {
return state
.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
state
.transform()
.deleteBackward()
.apply()
}

View File

@@ -0,0 +1,10 @@
import { Editor } from '../../..'
import React from 'react'
import ReactDOM from 'react-dom'
export default function (state) {
const div = document.createElement('div')
const props = { state }
ReactDOM.render(<Editor {...props} />, div)
}

View File

@@ -0,0 +1,4 @@
export default function (state) {
state.document.getTexts()
}

View File

@@ -0,0 +1,19 @@
export function before(state) {
return state
.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
state
.transform()
.insertText('a')
.apply()
}

View File

@@ -0,0 +1,9 @@
import SCHEMA from '../../../lib/constants/schema'
export default function (state) {
state
.transform({ normalized: false })
.normalize(SCHEMA)
.apply()
}

View File

@@ -0,0 +1,9 @@
import SCHEMA from '../../../lib/constants/schema'
export default function (state) {
state
.transform({ normalized: false })
.normalize(SCHEMA)
.apply()
}

View File

@@ -0,0 +1,23 @@
import SCHEMA from '../../../lib/constants/schema'
export default function (state) {
const selection = state.selection.merge({
anchorKey: '_cursor_',
anchorOffset: 0,
focusKey: '_cursor_',
focusOffset: 0
})
state
.transform({ normalized: false })
.normalize(SCHEMA)
.apply()
.transform()
// Make a fast, dummy change
.select(selection)
.insertText('inserted text')
// We want to compare the speed of that second normalize (optimized through memoization, or other means)
.normalize(SCHEMA)
.apply()
}

View File

@@ -0,0 +1,9 @@
import SCHEMA from '../../../lib/constants/schema'
export default function (state) {
state
.transform({ normalized: false })
.normalize(SCHEMA)
.apply()
}

View File

@@ -0,0 +1,22 @@
import { Editor } from '../../..'
import React from 'react'
import ReactDOM from 'react-dom'
export function before(state) {
return state.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
const div = document.createElement('div')
ReactDOM.render(<Editor state={state} />, div)
state = state.transform().splitBlock().apply()
ReactDOM.render(<Editor state={state} />, div)
}

View File

@@ -0,0 +1,19 @@
export function before(state) {
return state
.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
state
.transform()
.splitBlock()
.apply()
}

View File

@@ -0,0 +1,19 @@
export function before(state) {
return state
.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
state
.transform()
.splitBlock()
.apply()
}

View File

@@ -0,0 +1,19 @@
export function before(state) {
return state
.transform()
.select({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
}
export default function (state) {
state
.transform()
.splitBlock()
.apply()
}

48
bench/index.js Normal file
View File

@@ -0,0 +1,48 @@
/**
* Polyfills.
*/
require('jsdom-global/register')
/**
* Dependencies.
*/
const fs = require('fs')
const path = require('path')
const readMetadata = require('read-metadata')
const { __clear } = require('../lib/utils/memoize')
const { Raw } = require('..')
/**
* Benchmarks.
*/
suite('benchmarks', () => {
set('iterations', 200) // eslint-disable-line no-undef
set('mintime', 2000) // eslint-disable-line no-undef
const fixtures = path.resolve(__dirname, './fixtures')
const benchmarks = fs.readdirSync(fixtures)
benchmarks.forEach((benchmark) => {
if (benchmark[0] === '.') return
if (benchmark === 'normalize-document-twice') return
const dir = path.resolve(fixtures, benchmark)
const input = readMetadata.sync(path.resolve(dir, 'input.yaml'))
const initial = Raw.deserialize(input, { terse: true })
const module = require(dir)
const run = module.default
const state = module.before ? module.before(initial) : initial
bench(benchmark, () => { // eslint-disable-line no-undef
run(state)
})
after(() => {
__clear()
})
})
})

32
bench/reporter.js Normal file
View File

@@ -0,0 +1,32 @@
const { stdout } = process
module.exports = function (runner, utils) {
let hasSuite = false
let hasBench = false
runner.on('start', () => {
stdout.write('[')
})
runner.on('end', () => {
stdout.write(']')
})
runner.on('suite start', (suite) => {
if (hasSuite) stdout.write(',')
stdout.write(`{"name":"${suite.title}","benchmarks":[`)
hasSuite = true
})
runner.on('suite end', (suite) => {
hasBench = false
stdout.write(']}')
})
runner.on('bench end', (bench) => {
if (hasBench) stdout.write(',')
stdout.write(JSON.stringify(bench))
hasBench = true
})
}

View File

@@ -33,10 +33,10 @@
"babel-preset-react": "^6.5.0",
"babel-preset-stage-0": "^6.5.0",
"babelify": "^7.3.0",
"benchmark": "^2.1.1",
"browserify": "^13.0.1",
"browserify-global-shim": "^1.0.3",
"browserify-shim": "^3.8.12",
"chalk": "^1.1.3",
"disc": "^1.3.2",
"envify": "^3.4.1",
"eslint": "^3.8.1",
@@ -52,6 +52,7 @@
"jest": "^17.0.3",
"jsdom": "9.6.0",
"jsdom-global": "2.1.0",
"matcha": "^0.7.0",
"microtime": "2.1.1",
"mocha": "^2.5.3",
"np": "^2.9.0",
@@ -91,10 +92,9 @@
"gh-pages": "npm run build && npm run examples && gh-pages --dist ./examples",
"lint": "eslint --ignore-pattern 'build.dev.js' --ignore-pattern 'build.prod.js' '{examples,src}/**/*.js'",
"open": "open http://localhost:8080/dev.html",
"perf": "npm-run-all build:npm benchmarks",
"perf:save": "npm-run-all build:npm benchmarks:save",
"benchmarks": "babel-node ./perf/index.js --compare ./perf/reference.json",
"benchmarks:save": "babel-node ./perf/index.js --output ./perf/reference.json",
"bench": "npm-run-all build:npm benchs",
"benchs": "babel-node ./node_modules/.bin/_matcha --reporter ./bench/reporter bench/index.js > ./tmp/bench-comparison.json && babel-node ./bench/compare",
"benchs:save": "babel-node ./node_modules/.bin/_matcha --reporter ./bench/reporter bench/index.js > ./tmp/bench-baseline.json",
"prepublish": "npm run build",
"postpublish": "npm run gh-pages",
"release": "np",

View File

@@ -1,63 +0,0 @@
# Benchmarks
This folder contains a set of benchmarks used to compare performances between Slate's version. We use [BenchmarkJS](https://benchmarkjs.com/) to measure performances.
## Running the benchmark
The following command will make sure to compile Slate before running the benchmarks.
```
npm run perf
```
You can skip Slate's compilation by running directly
```
npm run benchmarks
```
### Comparing results
You can save the results of the benchmarks with:
```shell
npm run perf:save
```
The results are saved as JSON in `./perf/reference.json`. You can then checkout a different implementation, and run a comparison benchmark with the usual command:
```
npm run perf
```
`perf` and `benchmarks` automatically look for an existing `reference.json` to use for comparison.
### Understanding the results
Each benchmark prints its results, showing:
- The number of **operation per second**. This is the relevant value, that must be compared with values from different implementation.
- The **number of samples** run. BenchmarkJS has a special heuristic to choose how many samples must be made. The results are more accurate with a high number of samples. Low samples count is often tied with high relative margin of error
- The **relative margin** of error for the measure. The lower the value, the more accurate the results are. When compared with previous results, we display the average relative margin of error.
- (comparison only) A **comparison** of the two implementation, according to BenchmarkJS. It can be Slower, Faster, or Indeterminate.
- (comparison only) The **difference** in operations per second. Expressed as a percentage of the reference.
## Writing a benchmark
To add a benchmark, create a new folder in the `perf/benchmarks/` directory. It must contain two files:
1. `input.yaml` to provide an initial State
2. `index.js` to tell what to run
`index.js` must export a `run(state)` function. This whole function will be benchmarked. It will be run several times, with the parsed state from `input.yaml` as parameter. You can optionally export a `setup(state) -> state` function, to modify the state parsed from `input.yaml`.
Note 1: Everything must be sync.
Note 2: To avoid unwanted memoization, a different instance of `state` will be passed for every `run` call.
## Detailed options for the benchmark script
You can also launch the benchmark script directly. See usage:
``` shell
babel-node ./perf/index.js -h
```

View File

@@ -1,18 +0,0 @@
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
return state.transform().deleteBackward().apply()
}
}

View File

@@ -1,18 +0,0 @@
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
return state.transform().deleteBackward().apply()
}
}

View File

@@ -1,18 +0,0 @@
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
return state.transform().deleteBackward().apply()
}
}

View File

@@ -1,12 +0,0 @@
const { Editor } = require('../../../')
const React = require('react')
const ReactDOM = require('react-dom')
module.exports = {
run(state) {
const div = document.createElement('div')
const props = { state }
ReactDOM.render(<Editor {...props} />, div)
}
}

View File

@@ -1,6 +0,0 @@
module.exports = {
run(state) {
state.document.getTexts()
}
}

View File

@@ -1,18 +0,0 @@
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
return state.transform().insertText('a').apply()
}
}

View File

@@ -1,22 +0,0 @@
const { default: memoize } = require('../../../lib/utils/memoize')
module.exports = {
setup(state) {
let obj = {
fibonacci(n = 20) {
if (n === 0 || n === 1) {
return n
} else {
return this.fibonacci(n - 1) + this.fibonacci(n - 2)
}
}
}
memoize(obj, ['fibonacci'])
return obj
},
run(obj) {
obj.fibonacci()
}
}

View File

@@ -1,6 +0,0 @@
nodes:
- kind: block
type: paragraph
nodes:
- kind: text
text: 'Useless input state'

View File

@@ -1,6 +0,0 @@
module.exports = {
run(state) {
return state.transform({ normalized: false }).normalize().apply()
}
}

View File

@@ -1,6 +0,0 @@
module.exports = {
run(state) {
return state.transform({ normalized: false }).normalize().apply()
}
}

View File

@@ -1,19 +0,0 @@
module.exports = {
run(state) {
const selection = state.selection.merge({
anchorKey: '_cursor_',
anchorOffset: 0,
focusKey: '_cursor_',
focusOffset: 0
})
return state
.transform({ normalized: false }).normalize().apply()
.transform()
// Make a fast, dummy change
.moveTo(selection).insertText('inserted text')
// We want to compare the speed of that second normalize (optimized through memoization, or other means)
.normalize().apply()
}
}

View File

@@ -1,6 +0,0 @@
module.exports = {
run(state) {
return state.transform({ normalized: false }).normalize().apply()
}
}

View File

@@ -1,28 +0,0 @@
const { Editor } = require('../../../')
const React = require('react')
const ReactDOM = require('react-dom')
// Benchmarks a first rendering, followed by a new rendering after a split-block
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
const div = document.createElement('div')
ReactDOM.render(<Editor state={state} />, div)
state = state.transform().splitBlock().apply()
ReactDOM.render(<Editor state={state} />, div)
}
}

View File

@@ -1,18 +0,0 @@
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
return state.transform().splitBlock().apply()
}
}

View File

@@ -1,18 +0,0 @@
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
return state.transform().splitBlock().apply()
}
}

View File

@@ -1,18 +0,0 @@
module.exports = {
setup(state) {
// Move cursor
return state.transform()
.moveTo({
anchorKey: '_cursor_',
anchorOffset: 10,
focusKey: '_cursor_',
focusOffset: 10
})
.apply()
},
run(state) {
return state.transform().splitBlock().apply()
}
}

View File

@@ -1,343 +0,0 @@
// Performance benchmark
const USAGE = `
Usage: babel-node ./perf/index.js [--compare referencePath] [--only benchmarkName] [--output outputPath]
--compare referencePath Compare with results stored in the JSON at referencePath
--only benchmarkName Only run the designated benchmark (named after the benchmark directory)
--output outputPath Output the benchmarks results as JSON at outputPath
`
const Benchmark = require('benchmark')
const jsdomGlobal = require('jsdom-global')
// Setup virtual DOM for rendering tests, before loading React (so it
// see the fake DOM), but after loading BenchmarkJS so that it does
// not think we are running inside a browser.
jsdomGlobal()
const fs = require('fs')
const readMetadata = require('read-metadata')
const { Raw } = require('..')
const memoize = require('../lib/utils/memoize')
const { resolve } = require('path')
const DEFAULT_BENCHMARK = {
setup(state) { return state },
teardown() {},
run(state) {}
}
const BENCHMARK_OPTIONS = {
// To ensure a better accuracy, force a minimum number of samples
minSamples: 80 // default 10
}
// Because BenchmarkJS does not support scoped variables well, use
// globals... Each benchmark has its own namespace scope, that can be
// accessed through the `getScope` global function
const scopes = {}
global.currentBenchmark = undefined // The benchmark being run
global.setScope = function (benchmarkName, scope) {
scopes[benchmarkName] = scope
}
global.getScope = function () {
return scopes[global.currentBenchmark]
}
// --------------------------------------------------
// Run benchmarks
// --------------------------------------------------
function runBenchmarks() {
print('Benchmarks\n')
// Command line options
const { outputPath, reference, only, help } = parseCommandLineOptions(process)
if (help) return printUsage()
let suite = new Benchmark.Suite()
let results = {} // Can be saved as JSON
// For each benchmark
const suiteDir = resolve(__dirname, './benchmarks')
const benchmarks = fs.readdirSync(suiteDir)
for (const benchmarkName of benchmarks) {
if (benchmarkName[0] == '.') continue
if (only && benchmarkName != only) continue
const benchmarkDir = resolve(suiteDir, benchmarkName)
// Read benchmark specification
const benchmark = Object.assign({}, DEFAULT_BENCHMARK, require(benchmarkDir))
// Parse input Slate.State
const input = readMetadata.sync(resolve(benchmarkDir, 'input.yaml'))
// Setup global scope for this benchmark
global.setScope(benchmarkName, {
Raw,
memoize,
benchmark,
input
})
// Add it to the benchmark suite
suite.add(Object.assign({}, BENCHMARK_OPTIONS, {
name: benchmarkName,
onStart() {
print(indent(1), benchmarkName)
// Use this test's scope
global.currentBenchmark = benchmarkName
},
// Time spent in setup is not taken into account
setup() {
const scope = global.getScope()
// Each benchmark is given the chance to do its own setup
const state = scope.benchmark.setup( // eslint-disable-line no-unused-vars
scope.Raw.deserialize(scope.input, { terse: true })
)
},
// Because of the way BenchmarkJS compiles the functions,
// the variables declared in `setup` are visible to `fn`
fn() {
scope.benchmark.run(state) // eslint-disable-line no-undef
// Clear memoized values between each run
scope.memoize.__clear() // eslint-disable-line no-undef
},
onComplete() {
global.getScope().benchmark.teardown()
}
}))
}
suite
// On benchmark completion
.on('cycle', (event) => {
const result = serializeResult(event)
results[result.name] = result
compareResult(result, reference)
})
// On suite completion
.on('complete', (event) => {
if (outputPath) {
save(results, outputPath)
print(`\nSaved results as JSON to ${outputPath}`)
}
})
// Run async to properly flush logs
.run({ 'async': true })
}
/**
* @param {Node.Process} process
* @return {Object} { reference: JSON?, outputPath: String?, only: String? }
*/
function parseCommandLineOptions(process) {
let outputPath
let reference
let only
let help = false
const options = process.argv.slice(2)
for (let i = 0; i < options.length; i++) {
let option = options[i]
switch (option) {
case '-h':
case '--help':
help = true
break
case '--output':
outputPath = options[i + 1]
i++
break
case '--only':
only = options[i + 1]
i++
break
case '--compare':
let refPath = resolve(process.cwd(), options[i + 1])
if (exists(refPath)) {
let fileContents = fs.readFileSync(refPath, 'utf-8')
reference = JSON.parse(fileContents)
}
i++
break
default:
printUsage()
throw new Error(`Invalid argument ${option}`)
}
}
return {
outputPath,
reference,
only,
help
}
}
function printUsage() {
print(USAGE)
}
function exists(filepath) {
try {
fs.statSync(filepath)
return true
} catch (e) {
return false
}
}
function save(results, path) {
path = resolve(process.cwd(), path)
fs.writeFileSync(path, JSON.stringify(results, null, 2))
}
function serializeResult(event) {
const { target } = event
const { error, name } = target
const result = {
name
}
if (target.error) {
Object.assign(result, { error })
}
else {
const { hz } = target
const { mean, rme, sample } = target.stats
const stats = {
rme,
mean,
sample
}
Object.assign(result, {
hz,
stats
})
}
return result
}
/**
* Pretty print a benchmark result, along with its reference.
* Mean difference, and rme computations inspired from
* https://github.com/facebook/immutable-js/blob/master/resources/bench.js
*
* @param {Object} result
* @param {Object} reference (optional)
*/
function compareResult(result, reference = {}) {
const { name } = result
const ref = reference[name]
const errored = ref && (ref.error || result.error)
print(indent(2), 'Current: ', formatPerf(result))
if (ref) {
print(indent(2), 'Reference: ', formatPerf(ref))
}
// Print comparison
if (ref && !errored) {
print(indent(2), `comparison: ${compare(result, ref)}`)
}
// Print difference as percentage
if (ref && !errored) {
const newMean = 1 / result.stats.mean
const prevMean = 1 / ref.stats.mean
const diffMean = 100 * (newMean - prevMean) / prevMean
print(indent(2), `diff: ${signed(diffMean.toFixed(2))}%`) // diff: -3.45%
}
// Print relative mean error
if (ref && !errored) {
const aRme = 100 * Math.sqrt(
(square(result.stats.rme / 100) + square(ref.stats.rme / 100)) / 2
)
print(indent(2), `rme: \xb1${aRme.toFixed(2)}%`) // rme: ±6.22%
} else if (!result.error) {
print(indent(2), `rme: \xb1${result.stats.rme.toFixed(2)}%`) // rme: ±6.22%
}
print('') // newline
}
/**
* Pretty format a benchmark's ops/sec along with its sample size
* @param {Object} result
* @return {String}
*/
function formatPerf(result) {
if (result.error) return result.error
const { hz } = result
const runs = result.stats.sample.length
const opsSec = Benchmark.formatNumber(`${hz.toFixed(hz < 100 ? 2 : 0)}`)
return `${opsSec} ops/sec (${runs} runs sampled)`
}
/**
* @param {Object} newResult
* @param {Object} oldResult
* @return {String} Faster, Slower, or Indeterminate
*/
function compare(newResult, oldResult) {
const comparison = (new Benchmark()).compare.call(newResult, oldResult)
switch (comparison) {
case 1:
return 'Faster'
case -1:
return 'Slower'
default:
return 'Indeterminate'
}
}
function indent(level = 0) {
return Array(level + 1).join(' ')
}
function square(x) {
return x * x
}
function signed(x) {
return x > 0 ? `+${x}` : `${x}`
}
function print(...strs) {
console.log(...strs) // eslint-disable-line no-console
}
// --------------------------------------------------
// Main
// --------------------------------------------------
runBenchmarks()

12
test/Readme.md Normal file
View File

@@ -0,0 +1,12 @@
This directory contains all of the tests for Slate. It's separated further into a series of directories:
- [**Behavior**](./behavior) — testing user interaction behaviors like pressing keys, or blurring the editor.
- [**Benchmarks**](./benchmarks) — testing performance of various parts of Slate.
- [**Helpers**](./helpers) — a series of test helper utilities to make writing tests easier.
- [**Rendering**](./rendering) — testing the rendering logic of Slate's components.
- [**Schema**](./schema) — testing the core schema validation logic.
- [**Serializers**](./serializers) - testing the serializers that Slate ships with by default.
- [**Transforms**](./transforms) — testing the transforms that change the content of a Slate editor.
Feel free to poke around in each of them to see how they work!