OpenAI - Response API
Verwendung​
LiteLLM Python SDK​
Nicht-streaming​
OpenAI Nicht-Streaming-Antwort
import litellm
# Non-streaming response
response = litellm.responses(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn.",
max_output_tokens=100
)
print(response)
Streaming​
OpenAI Streaming-Antwort
import litellm
# Streaming response
response = litellm.responses(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn.",
stream=True
)
for event in response:
print(event)
Eine Antwort abrufen​
Antwort nach ID abrufen
import litellm
# First, create a response
response = litellm.responses(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn.",
max_output_tokens=100
)
# Get the response ID
response_id = response.id
# Retrieve the response by ID
retrieved_response = litellm.get_responses(
response_id=response_id
)
print(retrieved_response)
# For async usage
# retrieved_response = await litellm.aget_responses(response_id=response_id)
Eine Antwort löschen​
Antwort nach ID löschen
import litellm
# First, create a response
response = litellm.responses(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn.",
max_output_tokens=100
)
# Get the response ID
response_id = response.id
# Delete the response by ID
delete_response = litellm.delete_responses(
response_id=response_id
)
print(delete_response)
# For async usage
# delete_response = await litellm.adelete_responses(response_id=response_id)
LiteLLM Proxy mit OpenAI SDK​
- config.yaml einrichten
OpenAI Proxy-Konfiguration
model_list:
- model_name: openai/o1-pro
litellm_params:
model: openai/o1-pro
api_key: os.environ/OPENAI_API_KEY
- LiteLLM Proxy-Server starten
LiteLLM Proxy-Server starten
litellm --config /path/to/config.yaml
# RUNNING on http://0.0.0.0:4000
- OpenAI SDK mit LiteLLM Proxy verwenden
Nicht-streaming​
OpenAI Proxy Nicht-Streaming-Antwort
from openai import OpenAI
# Initialize client with your proxy URL
client = OpenAI(
base_url="https://:4000", # Your proxy URL
api_key="your-api-key" # Your proxy API key
)
# Non-streaming response
response = client.responses.create(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn."
)
print(response)
Streaming​
OpenAI Proxy Streaming-Antwort
from openai import OpenAI
# Initialize client with your proxy URL
client = OpenAI(
base_url="https://:4000", # Your proxy URL
api_key="your-api-key" # Your proxy API key
)
# Streaming response
response = client.responses.create(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn.",
stream=True
)
for event in response:
print(event)
Eine Antwort abrufen​
Antwort nach ID mit OpenAI SDK abrufen
from openai import OpenAI
# Initialize client with your proxy URL
client = OpenAI(
base_url="https://:4000", # Your proxy URL
api_key="your-api-key" # Your proxy API key
)
# First, create a response
response = client.responses.create(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn."
)
# Get the response ID
response_id = response.id
# Retrieve the response by ID
retrieved_response = client.responses.retrieve(response_id)
print(retrieved_response)
Eine Antwort löschen​
Antwort nach ID mit OpenAI SDK löschen
from openai import OpenAI
# Initialize client with your proxy URL
client = OpenAI(
base_url="https://:4000", # Your proxy URL
api_key="your-api-key" # Your proxy API key
)
# First, create a response
response = client.responses.create(
model="openai/o1-pro",
input="Tell me a three sentence bedtime story about a unicorn."
)
# Get the response ID
response_id = response.id
# Delete the response by ID
delete_response = client.responses.delete(response_id)
print(delete_response)
Unterstützte Antwort-API-Parameter​
| Anbieter | UnterstĂĽtzte Parameter |
|---|---|
openai | Alle Antwort-API-Parameter werden unterstĂĽtzt |
Computer-Nutzung​
- LiteLLM Python SDK
- LiteLLM Proxy
import litellm
# Non-streaming response
response = litellm.responses(
model="computer-use-preview",
tools=[{
"type": "computer_use_preview",
"display_width": 1024,
"display_height": 768,
"environment": "browser" # other possible values: "mac", "windows", "ubuntu"
}],
input=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Check the latest OpenAI news on bing.com."
}
# Optional: include a screenshot of the initial state of the environment
# {
# type: "input_image",
# image_url: f"data:image/png;base64,{screenshot_base64}"
# }
]
}
],
reasoning={
"summary": "concise",
},
truncation="auto"
)
print(response.output)
- config.yaml einrichten
OpenAI Proxy-Konfiguration
model_list:
- model_name: openai/o1-pro
litellm_params:
model: openai/o1-pro
api_key: os.environ/OPENAI_API_KEY
- LiteLLM Proxy-Server starten
LiteLLM Proxy-Server starten
litellm --config /path/to/config.yaml
# RUNNING on http://0.0.0.0:4000
- Testen Sie es!
OpenAI Proxy Nicht-Streaming-Antwort
from openai import OpenAI
# Initialize client with your proxy URL
client = OpenAI(
base_url="https://:4000", # Your proxy URL
api_key="your-api-key" # Your proxy API key
)
# Non-streaming response
response = client.responses.create(
model="computer-use-preview",
tools=[{
"type": "computer_use_preview",
"display_width": 1024,
"display_height": 768,
"environment": "browser" # other possible values: "mac", "windows", "ubuntu"
}],
input=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Check the latest OpenAI news on bing.com."
}
# Optional: include a screenshot of the initial state of the environment
# {
# type: "input_image",
# image_url: f"data:image/png;base64,{screenshot_base64}"
# }
]
}
],
reasoning={
"summary": "concise",
},
truncation="auto"
)
print(response)