#!/usr/bin/env python3 """Export NNUE weights to .nbai format for runtime loading.""" import json import struct import sys from datetime import datetime from pathlib import Path import torch MAGIC = 0x4942_414E # bytes 'N','B','A','I' as little-endian int32 VERSION = 1 def _read_sidecar(weights_file: str) -> dict: sidecar = weights_file.replace(".pt", "_metadata.json") if Path(sidecar).exists(): with open(sidecar) as f: return json.load(f) return {} def _infer_layers(state_dict: dict) -> list[dict]: """Derive layer descriptors from state_dict weight shapes. Assumes layers named l1, l2, ..., lN. All hidden layers get activation 'relu'; the last gets 'linear'. """ names = sorted( {k.split(".")[0] for k in state_dict if k.endswith(".weight")}, key=lambda n: int(n[1:]), ) layers = [] for i, name in enumerate(names): out_size, in_size = state_dict[f"{name}.weight"].shape activation = "linear" if i == len(names) - 1 else "relu" layers.append({"activation": activation, "inputSize": int(in_size), "outputSize": int(out_size)}) return layers def _write_floats(f, tensor): data = tensor.float().flatten().cpu().numpy() f.write(struct.pack(" {l['outputSize']} [{l['activation']}]") Path(output_file).parent.mkdir(parents=True, exist_ok=True) with open(output_file, "wb") as f: # Header f.write(struct.pack(" 1: weights_file = sys.argv[1] if len(sys.argv) > 2: output_file = sys.argv[2] if len(sys.argv) > 3: trained_by = sys.argv[3] if len(sys.argv) > 4: train_loss = float(sys.argv[4]) export_to_nbai(weights_file, output_file, trained_by, train_loss)