Files
omp_config/models.yml
T
2026-04-30 16:48:39 +08:00

94 lines
2.2 KiB
YAML

providers:
llm-public-anthropic:
baseUrl: https://example.com/v1
api: anthropic-messages
apiKey: LLM_PUBLIC_KEY
models:
- id: kimi-k2.6
name: KIMI K2.6
reasoning: true
input:
- text
- image
contextWindow: 262144
maxTokens: 262144
cost:
input: 0.95
output: 4
cacheRead: 0.16
cacheWrite: 0
llm-public:
baseUrl: https://example.com/v1
api: openai-responses
apiKey: LLM_PUBLIC_KEY
models:
- id: gpt-5.5
name: GPT-5.5
reasoning: true
input:
- text
- image
# models.dev also lists PDF input; OMP models.yml currently supports text/image only.
# models.dev limit.input: 920000
contextWindow: 920000
maxTokens: 130000
cost:
input: 5
output: 30
cacheRead: 0.5
cacheWrite: 0
- id: gpt-5.3-codex-spark
name: GPT-5.3 Codex Spark
reasoning: true
input:
- text
- image
contextWindow: 400000
maxTokens: 128000
cost:
input: 0
output: 0
cacheRead: 0
cacheWrite: 0
- id: gpt-5.4
name: GPT-5.4
reasoning: true
input:
- text
- image
# prompts with >272K input tokens are priced at 2x input and 1.5x output for the full session for standard, batch, and flex.
contextWindow: 272000
maxTokens: 128000
cost:
input: 2.5
output: 15
cacheRead: 0.25
cacheWrite: 0
- id: gpt-5.4-mini
name: GPT-5.4 mini
reasoning: true
input:
- text
- image
contextWindow: 400000
maxTokens: 128000
cost:
input: 0.75
output: 4.5
cacheRead: 0.075
cacheWrite: 0
- id: gpt-5.3-codex
name: GPT-5.3 Codex
reasoning: true
input:
- text
- image
contextWindow: 400000
maxTokens: 128000
cost:
input: 0
output: 0
cacheRead: 0
cacheWrite: 0