OpenAI Chat Completions API
API for conversational AI using GPT models.
API for conversational AI using GPT models.
openapi: 3.1.0
info:
title: OpenAI APIs OpenAI Chat Completions API
description: >-
API for conversational AI using GPT models. Creates model responses for
chat conversations, supporting text and multimodal inputs with
configurable parameters for temperature, token limits, and tool use.
version: '1.0'
contact:
name: OpenAI Support
email: [email protected]
url: https://help.openai.com
termsOfService: https://openai.com/policies/terms-of-use
externalDocs:
description: OpenAI Chat Completions API Documentation
url: https://platform.openai.com/docs/api-reference/chat
servers:
- url: https://api.openai.com/v1
description: OpenAI Production API
tags:
- name: Chat
description: Chat completion operations
security:
- bearerAuth: []
paths:
/chat/completions:
post:
operationId: createChatCompletion
summary: OpenAI APIs Create chat completion
description: >-
Creates a model response for the given chat conversation. Supports
text, image, and audio inputs and can produce text, tool call, and
structured JSON outputs.
tags:
- Chat
requestBody:
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/CreateChatCompletionRequest'
responses:
'200':
description: Chat completion response
content:
application/json:
schema:
$ref: '#/components/schemas/ChatCompletionResponse'
'400':
description: Invalid request
'401':
description: Unauthorized - invalid or missing API key
'429':
description: Rate limit exceeded
'500':
description: Server error
components:
securitySchemes:
bearerAuth:
type: http
scheme: bearer
bearerFormat: API Key
description: OpenAI API key passed as a Bearer token
schemas:
CreateChatCompletionRequest:
type: object
required:
- model
- messages
properties:
model:
type: string
description: ID of the model to use (e.g., gpt-4o, gpt-4-turbo, gpt-3.5-turbo)
examples:
- gpt-4o
messages:
type: array
description: A list of messages comprising the conversation so far
items:
$ref: '#/components/schemas/ChatMessage'
temperature:
type: number
minimum: 0
maximum: 2
default: 1
description: Sampling temperature between 0 and 2
top_p:
type: number
minimum: 0
maximum: 1
default: 1
description: Nucleus sampling parameter
n:
type: integer
minimum: 1
default: 1
description: Number of chat completion choices to generate
stream:
type: boolean
default: false
description: Whether to stream partial message deltas
stop:
oneOf:
- type: string
- type: array
items:
type: string
description: Up to 4 sequences where the API will stop generating
max_tokens:
type: integer
description: Maximum number of tokens to generate in the completion
max_completion_tokens:
type: integer
description: Upper bound for tokens generated in the completion including reasoning tokens
presence_penalty:
type: number
minimum: -2
maximum: 2
default: 0
description: Penalize new tokens based on presence in text so far
frequency_penalty:
type: number
minimum: -2
maximum: 2
default: 0
description: Penalize new tokens based on frequency in text so far
logit_bias:
type: object
additionalProperties:
type: number
description: Modify the likelihood of specified tokens appearing
user:
type: string
description: A unique identifier representing your end-user
tools:
type: array
items:
$ref: '#/components/schemas/Tool'
description: A list of tools the model may call
tool_choice:
oneOf:
- type: string
enum:
- none
- auto
- required
- type: object
properties:
type:
type: string
enum:
- function
function:
type: object
properties:
name:
type: string
description: Controls which tool is called by the model
response_format:
type: object
properties:
type:
type: string
enum:
- text
- json_object
- json_schema
description: Specifies the format that the model must output
seed:
type: integer
description: Deterministic sampling seed for reproducible outputs
ChatMessage:
type: object
required:
- role
- content
properties:
role:
type: string
enum:
- system
- user
- assistant
- tool
description: The role of the message author
content:
oneOf:
- type: string
- type: array
items:
type: object
description: The contents of the message
name:
type: string
description: An optional name for the participant
tool_calls:
type: array
items:
$ref: '#/components/schemas/ToolCall'
description: Tool calls generated by the model
tool_call_id:
type: string
description: Tool call that this message is responding to
Tool:
type: object
required:
- type
- function
properties:
type:
type: string
enum:
- function
description: The type of the tool
function:
type: object
required:
- name
properties:
name:
type: string
description: The name of the function
description:
type: string
description: A description of what the function does
parameters:
type: object
description: The parameters the function accepts as JSON Schema
ToolCall:
type: object
properties:
id:
type: string
description: The ID of the tool call
type:
type: string
enum:
- function
function:
type: object
properties:
name:
type: string
description: The name of the function to call
arguments:
type: string
description: The arguments to call the function with as JSON
ChatCompletionResponse:
type: object
properties:
id:
type: string
description: A unique identifier for the chat completion
object:
type: string
enum:
- chat.completion
created:
type: integer
description: Unix timestamp of when the completion was created
model:
type: string
description: The model used for the completion
choices:
type: array
items:
type: object
properties:
index:
type: integer
message:
$ref: '#/components/schemas/ChatMessage'
finish_reason:
type: string
enum:
- stop
- length
- tool_calls
- content_filter
usage:
$ref: '#/components/schemas/Usage'
system_fingerprint:
type: string
description: Fingerprint of the backend configuration
Usage:
type: object
properties:
prompt_tokens:
type: integer
description: Number of tokens in the prompt
completion_tokens:
type: integer
description: Number of tokens in the generated completion
total_tokens:
type: integer
description: Total number of tokens used