feat(benchmarks): add JMH benchmarks for performance testing and load scripts
Build & Test (NowChessSystems) TeamCity build failed

This commit is contained in:
2026-05-05 21:14:42 +02:00
committed by Janis
parent 0eb752d493
commit 763a2b8c39
13 changed files with 572 additions and 1 deletions
+3 -1
View File
@@ -3,6 +3,7 @@ plugins {
id("org.scoverage") version "8.1" apply false
id("com.diffplug.spotless") version "8.4.0" apply false
id("io.github.cosmicsilence.scalafix") version "0.2.6" apply false
id("me.champeau.jmh") version "0.7.2" apply false
}
group = "de.nowchess"
@@ -99,7 +100,8 @@ val versions = mapOf(
"SCALA_PARSER_COMBINATORS" to "2.4.0",
"FASTPARSE" to "3.0.2",
"JACKSON" to "2.17.2",
"JACKSON_SCALA" to "2.17.2"
"JACKSON_SCALA" to "2.17.2",
"JMH" to "1.37"
)
extra["VERSIONS"] = versions
+100
View File
@@ -0,0 +1,100 @@
# NowChess Load Testing with k6
Performance testing suite for NowChess services using k6.
## Installation
```bash
# Install k6 (macOS)
brew install k6
# Or download from https://k6.io/docs/getting-started/installation/
```
## Test Scenarios
### 1. Ramp-Up Test
Gradually increases load from 10 to 100 concurrent users.
```bash
k6 run ramp-up.js
```
Target: Identify system behavior under gradual load increase.
### 2. Stress Test
Incremental load increase up to 500 concurrent users to find breaking point.
```bash
k6 run stress-test.js
```
Target: Determine system capacity and failure point.
### 3. Spike Test
Sudden traffic surge (baseline 50 → 500 users instantly).
```bash
k6 run spike-test.js
```
Target: Test recovery and resilience to sudden spikes.
### 4. Constant Load Test
Maintains 50 VUs for 10 minutes.
```bash
k6 run constant-load.js
```
Target: Check stability under sustained load.
## Environment Variables
```bash
# Override service endpoints
export BASE_URL=http://localhost:8080
export ACCOUNT_HOST=http://localhost:8083
export STORE_HOST=http://localhost:8085
k6 run ramp-up.js
```
## Prerequisites
Ensure services are running:
- Core: `localhost:8080`
- Account: `localhost:8083`
- Store: `localhost:8085`
- Redis: `localhost:6379` (with increased pool size)
## Metrics Interpretation
- `http_req_duration`: Response time (p95, p99 percentiles matter most)
- `http_req_failed`: Failed requests (connection errors, errors, non-2xx responses)
- `vus`: Virtual Users (concurrent connections)
- `iterations`: Completed test cycles per VU
## Results
k6 generates HTML report output. Use with:
```bash
k6 run --out=csv=results.csv ramp-up.js
```
## Troubleshooting
If you see:
- **"max pool wait timeout"** → Redis pool still too small or maxWaitTime too short
- **"connection refused"** → Service not running
- **"high p99 latency"** → System approaching capacity
Increase Redis pool settings in `modules/*/src/main/resources/application.yml`:
```yaml
quarkus:
redis:
pool:
max-size: 128 # Increase if still hitting limits
max-waiting: 256
```
+25
View File
@@ -0,0 +1,25 @@
export const BASE_URL = __ENV.BASE_URL || 'http://localhost:8080';
export const ACCOUNT_HOST = __ENV.ACCOUNT_HOST || 'http://localhost:8083';
export const STORE_HOST = __ENV.STORE_HOST || 'http://localhost:8085';
export const CORE_HOST = BASE_URL;
export const ENDPOINTS = {
// Account endpoints
accountCreateUser: `${ACCOUNT_HOST}/api/account`,
accountLogin: `${ACCOUNT_HOST}/api/account/login`,
accountProfile: `${ACCOUNT_HOST}/api/account/me`,
accountPublicProfile: (username) => `${ACCOUNT_HOST}/api/account/${username}`,
// Store endpoints
storeGame: `${STORE_HOST}/api/games`,
storeGameById: (gameId) => `${STORE_HOST}/api/games/${gameId}`,
// Core endpoints (game operations)
gameWebSocket: (gameId) => `ws://localhost:8080/api/games/${gameId}/ws`,
};
export const TEST_USERS = [
{ username: 'load-test-user-1', email: 'load1@example.com' },
{ username: 'load-test-user-2', email: 'load2@example.com' },
{ username: 'load-test-user-3', email: 'load3@example.com' },
];
+40
View File
@@ -0,0 +1,40 @@
import http from 'k6/http';
import { check, sleep } from 'k6';
import { ENDPOINTS } from './config.js';
export const options = {
vus: 50,
duration: '10m',
thresholds: {
http_req_duration: ['p(95)<500', 'p(99)<1000'],
http_req_failed: ['rate<0.1'],
},
};
export default function () {
// Simulate consistent user traffic
// 1. Get account profile
const profileRes = http.get(ENDPOINTS.accountProfile);
check(profileRes, {
'profile status is 200 or 401': (r) => r.status === 200 || r.status === 401,
});
sleep(0.5);
// 2. Get public profile
const publicRes = http.get(ENDPOINTS.accountPublicProfile('testuser'));
check(publicRes, {
'public profile status ok': (r) => r.status > 0,
});
sleep(0.5);
// 3. List store games
const gamesRes = http.get(ENDPOINTS.storeGame);
check(gamesRes, {
'store games status ok': (r) => r.status > 0,
});
sleep(1);
}
+35
View File
@@ -0,0 +1,35 @@
import http from 'k6/http';
import { check, sleep } from 'k6';
import { ENDPOINTS, ACCOUNT_HOST } from './config.js';
export const options = {
stages: [
{ duration: '1m', target: 10 },
{ duration: '3m', target: 50 },
{ duration: '5m', target: 100 },
{ duration: '3m', target: 50 },
{ duration: '1m', target: 0 },
],
thresholds: {
http_req_duration: ['p(95)<500', 'p(99)<1000'],
http_req_failed: ['rate<0.1'],
},
};
export default function () {
// Test account endpoints
const accountRes = http.get(ENDPOINTS.accountPublicProfile('testuser'));
check(accountRes, {
'account endpoint status is 200': (r) => r.status === 200,
});
sleep(1);
// Test store endpoints
const storeRes = http.get(ENDPOINTS.storeGame);
check(storeRes, {
'store endpoint status is 200': (r) => r.status === 200 || r.status === 401,
});
sleep(1);
}
+68
View File
@@ -0,0 +1,68 @@
#!/bin/bash
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default values
TEST_TYPE="${1:-ramp-up}"
BASE_URL="${BASE_URL:-http://localhost:8080}"
OUTPUT_FILE="${2:-results-$(date +%s).csv}"
echo -e "${BLUE}NowChess Load Test Runner${NC}"
echo "Test Type: $TEST_TYPE"
echo "Base URL: $BASE_URL"
echo ""
# Check if k6 is installed
if ! command -v k6 &> /dev/null; then
echo -e "${RED}Error: k6 not found. Install from https://k6.io/docs/getting-started/installation/${NC}"
exit 1
fi
case "$TEST_TYPE" in
ramp-up)
echo -e "${GREEN}Running Ramp-Up Test (10->100 VUs over 13 minutes)${NC}"
k6 run --out=csv=$OUTPUT_FILE ramp-up.js
;;
stress)
echo -e "${GREEN}Running Stress Test (up to 500 VUs)${NC}"
k6 run --out=csv=$OUTPUT_FILE stress-test.js
;;
spike)
echo -e "${GREEN}Running Spike Test (sudden 50->500 spike)${NC}"
k6 run --out=csv=$OUTPUT_FILE spike-test.js
;;
constant)
echo -e "${GREEN}Running Constant Load Test (50 VUs for 10m)${NC}"
k6 run --out=csv=$OUTPUT_FILE constant-load.js
;;
all)
echo -e "${GREEN}Running All Tests${NC}"
for test in ramp-up stress spike constant; do
echo ""
echo -e "${BLUE}Starting $test test...${NC}"
$0 $test
sleep 5
done
exit 0
;;
*)
echo -e "${RED}Usage: $0 {ramp-up|stress|spike|constant|all} [output-file]${NC}"
echo ""
echo "Examples:"
echo " $0 ramp-up"
echo " $0 stress results.csv"
echo " $0 all"
exit 1
;;
esac
echo -e "${GREEN}Test complete. Results saved to $OUTPUT_FILE${NC}"
+36
View File
@@ -0,0 +1,36 @@
import http from 'k6/http';
import { check, sleep } from 'k6';
import { ENDPOINTS } from './config.js';
export const options = {
stages: [
{ duration: '1m', target: 50 },
{ duration: '30s', target: 500, ramp: 'fast' },
{ duration: '2m', target: 500 },
{ duration: '30s', target: 50, ramp: 'fast' },
{ duration: '1m', target: 0 },
],
thresholds: {
http_req_duration: ['p(95)<2000', 'p(99)<5000'],
http_req_failed: ['rate<0.3'],
},
};
export default function () {
// Rapid account checks
const urls = [
ENDPOINTS.accountPublicProfile('testuser'),
ENDPOINTS.storeGame,
ENDPOINTS.accountProfile,
];
urls.forEach((url) => {
const res = http.get(url);
check(res, {
'response received': (r) => r.status > 0,
'response time under 3s': (r) => r.timings.duration < 3000,
});
});
sleep(0.1);
}
+38
View File
@@ -0,0 +1,38 @@
import http from 'k6/http';
import { check, sleep } from 'k6';
import { ENDPOINTS } from './config.js';
export const options = {
stages: [
{ duration: '2m', target: 100 },
{ duration: '5m', target: 200 },
{ duration: '5m', target: 300 },
{ duration: '5m', target: 400 },
{ duration: '5m', target: 500 },
{ duration: '5m', target: 400 },
{ duration: '5m', target: 200 },
{ duration: '2m', target: 0 },
],
thresholds: {
http_req_duration: ['p(95)<1000', 'p(99)<2000'],
http_req_failed: ['rate<0.2'],
},
};
export default function () {
const userIndex = Math.floor(Math.random() * 10);
// Burst account endpoint
const accountRes = http.get(ENDPOINTS.accountPublicProfile(`user-${userIndex}`));
check(accountRes, {
'account endpoint responds': (r) => r.status > 0,
});
// Burst store endpoint
const storeRes = http.get(ENDPOINTS.storeGame);
check(storeRes, {
'store endpoint responds': (r) => r.status > 0,
});
sleep(Math.random() * 0.5);
}
+36
View File
@@ -0,0 +1,36 @@
# Gradle
.gradle/
build/
# Eclipse
.project
.classpath
.settings/
bin/
# IntelliJ
.idea
*.ipr
*.iml
*.iws
# NetBeans
nb-configuration.xml
# Visual Studio Code
.vscode
.factorypath
# OSX
.DS_Store
# Vim
*.swp
*.swo
# patch
*.orig
*.rej
# Local environment
.env
+77
View File
@@ -0,0 +1,77 @@
# JMH Benchmarks
Java Microbenchmark Harness (JMH) benchmarks for performance-critical components.
## Building
```bash
./gradlew modules:benchmarks:jmhJar
```
Produces `modules/benchmarks/build/libs/benchmarks-1.0-SNAPSHOT-jmh.jar`.
## Running Benchmarks
Since these benchmarks are written in Scala, JMH auto-discovery via jar packaging doesn't work. Instead, run benchmarks via classpath:
```bash
./gradlew modules:benchmarks:run --args='de.nowchess.benchmarks.MoveBenchmark'
```
Or compile and run with explicit classpath:
```bash
./gradlew modules:benchmarks:compileScala
java -cp "modules/benchmarks/build/classes/scala/main:modules/benchmarks/build/resources/main:$(./gradlew -q printClasspath)" \
org.openjdk.jmh.Main de.nowchess.benchmarks.MoveBenchmark
```
Adjust JMH options:
- `-f 1` — 1 fork (faster iteration, less reliable)
- `-i 3 -w 2` — 3 measurement iterations, 2 warmup iterations
- `-bm avgt` — average time mode
- `-tu us` — time unit (microseconds)
Example:
```bash
java -cp "..." org.openjdk.jmh.Main de.nowchess.benchmarks.MoveBenchmark -f 1 -i 3 -w 2 -bm avgt
```
## Writing Benchmarks
Create Scala class with `@Benchmark` methods:
```scala
@BenchmarkMode(Array(Mode.AverageTime, Mode.Throughput))
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Benchmark)
@Fork(2)
@Warmup(iterations = 3)
@Measurement(iterations = 5)
class SomeBenchmark {
@Setup(Level.Trial)
def setup(): Unit = ???
@org.openjdk.jmh.annotations.Benchmark
def benchmarkSomething(bh: Blackhole): Unit = {
val result = expensiveOperation()
bh.consume(result) // Prevent dead-code elimination
}
}
```
## Benchmark Modes
- `AverageTime` — average execution time per operation
- `Throughput` — operations per unit time
- `SampleTime` — latency samples (min, max, percentiles)
- `SingleShotTime` — total time to execute N ops once
- `All` — run all modes
## Limitations
JMH's Gradle plugin doesn't fully support Scala annotation processing, so benchmarks must be run explicitly by class name rather than auto-discovered. This is a known limitation of JMH + Scala; consider Java-based benchmarks for full auto-discovery support.
## References
- [OpenJDK JMH](https://github.com/openjdk/jmh)
- [JMH Samples](https://hg.openjdk.org/code-tools/jmh/file/tip/jmh-samples)
+61
View File
@@ -0,0 +1,61 @@
plugins {
id("scala")
id("me.champeau.jmh")
}
group = "de.nowchess"
version = "1.0-SNAPSHOT"
@Suppress("UNCHECKED_CAST")
val versions = rootProject.extra["VERSIONS"] as Map<String, String>
repositories {
mavenCentral()
}
scala {
scalaVersion = versions["SCALA3"]!!
}
dependencies {
implementation("org.scala-lang:scala3-library_3") {
version {
strictly(versions["SCALA3"]!!)
}
}
compileOnly("org.scala-lang:scala3-compiler_3") {
version {
strictly(versions["SCALA3"]!!)
}
}
// Core modules for benchmarking
implementation(project(":modules:api"))
implementation(project(":modules:core"))
implementation(project(":modules:rule"))
implementation(project(":modules:bot-platform"))
// JMH (jmh configuration is for annotation processor + runtime)
implementation("org.openjdk.jmh:jmh-core:${versions["JMH"]!!}")
jmh("org.openjdk.jmh:jmh-core:${versions["JMH"]!!}")
jmh("org.openjdk.jmh:jmh-generator-annprocess:${versions["JMH"]!!}")
}
configurations.matching { !it.name.startsWith("jmh") }.configureEach {
resolutionStrategy.force("org.scala-lang:scala-library:${versions["SCALA_LIBRARY"]!!}")
}
jmh {
jmhVersion.set(versions["JMH"]!!)
includeTests.set(false)
duplicateClassesStrategy.set(DuplicatesStrategy.WARN)
}
tasks.withType<JavaCompile> {
options.encoding = "UTF-8"
options.compilerArgs.add("-parameters")
}
tasks.withType<org.gradle.api.tasks.scala.ScalaCompile> {
scalaCompileOptions.additionalParameters = listOf("-encoding", "UTF-8")
}
@@ -0,0 +1,52 @@
package de.nowchess.benchmarks
import org.openjdk.jmh.annotations.{BenchmarkMode, Fork, Measurement, OutputTimeUnit, Setup, State, Warmup}
import org.openjdk.jmh.annotations.Mode
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.Level
import org.openjdk.jmh.infra.Blackhole
import de.nowchess.api.board.{File, Rank, Square}
import de.nowchess.api.game.GameContext
import de.nowchess.rules.sets.DefaultRules
import java.util.concurrent.TimeUnit
// scalafix:off DisableSyntax.var
@BenchmarkMode(Array(Mode.AverageTime, Mode.Throughput))
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Benchmark)
@Fork(value = 2, warmups = 1)
@Warmup(iterations = 3, time = 1)
@Measurement(iterations = 5, time = 1)
class MoveBenchmark {
private var gameContext: GameContext = scala.compiletime.uninitialized
@Setup(Level.Trial)
def setup(): Unit =
gameContext = GameContext.initial
@org.openjdk.jmh.annotations.Benchmark
def benchmarkAllLegalMoves(bh: Blackhole): Unit = {
val moves = DefaultRules.allLegalMoves(gameContext)
bh.consume(moves)
}
@org.openjdk.jmh.annotations.Benchmark
def benchmarkIsCheck(bh: Blackhole): Unit = {
val inCheck = DefaultRules.isCheck(gameContext)
bh.consume(inCheck)
}
@org.openjdk.jmh.annotations.Benchmark
def benchmarkLegalMovesFirstSquare(bh: Blackhole): Unit = {
val firstSquare = Square(File.A, Rank.R2)
val moves = DefaultRules.legalMoves(gameContext)(firstSquare)
bh.consume(moves)
}
@org.openjdk.jmh.annotations.Benchmark
def benchmarkIsCheckmate(bh: Blackhole): Unit = {
val result = DefaultRules.isCheckmate(gameContext)
bh.consume(result)
}
}
+1
View File
@@ -26,4 +26,5 @@ include(
"modules:ws",
"modules:store",
"modules:coordinator",
"modules:benchmarks",
)