Meta
MLX Community

Llama 3.2 3B Instruct benchmark on an Apple's logo.M1 Max · 64 GB

<- Runs

Prompt tokens

122,880

Generation tokens

30,720

Trials passed

30/30

Verified

68.4 tok/s

652.0 tok/s

Peak memory

3.43/64 GB

Runs well

Trials

Decode / Prefill Speeds

Metadata

metadata.json
{
"runId": "run_073bc90e-81ad-4bc4-8912-d0e48490247f",
"bundleId": "mlx-llama-3.2-3b-instruct-4bit-225b73",
"status": "verified",
"promptTokens": 122880,
"completionTokens": 30720,
"contextLength": 5120,
"harness": {
"version": "0.1.8",
"gitSha": "753787f"
},
"runtime": {
"name": "mlx_lm",
"version": "0.31.0",
"buildFlags": null
},
"model": {
"displayName": "Llama 3.2 3B Instruct",
"format": "mlx",
"quant": "4bit",
"architecture": "llama",
"source": "mlx-community/Llama-3.2-3B-Instruct-4bit",
"fileSizeBytes": 1807496278,
"lab": {
"name": "Meta",
"slug": "meta"
},
"quantizedBy": {
"name": "MLX Community",
"slug": "mlx-community"
}
},
"device": {
"cpu": "Apple M1 Max",
"cpuCores": 10,
"gpu": "Apple M1 Max",
"gpuCores": 32,
"gpuCount": 1,
"ramGb": 64,
"osName": "macOS",
"osVersion": "15.6.1"
},
"decodeTpsMean": 68.4,
"prefillTpsMean": 652,
"ttftP50Ms": 6087.94,
"idleTpsMean": 1409,
"peakRssMb": 3517,
"trialsPassed": 30,
"trialsTotal": 30,
"runnabilityScore": 0.7948006452287947,
"bundleSha256": "62fbe86db21c33400920ce77669770babdcbc4977f3f8ec9d58c8654bd60b79a",
"createdAt": "2026-03-23T01:14:13.731Z"
}