Skip to main content
!pip install vlmrun

import openai

# Initialize the OpenAI client
client = openai.OpenAI(base_url="https://agent.vlm.run/v1", api_key="<VLMRUN_API_KEY>")

# Create a chat completion
response = client.chat.completions.create(
  model="vlm-agent-1",
  messages=[{"role": "user", "content": "Who are you and what can you do?"}],
  temperature=0.7,
)
!pip install vlmrun

import openai

# Initialize the OpenAI client
client = openai.OpenAI(base_url="https://agent.vlm.run/v1", api_key="<VLMRUN_API_KEY>")

# Create a chat completion
response = client.chat.completions.create(
  model="vlm-agent-1",
  messages=[{"role": "user", "content": "Who are you and what can you do?"}],
  temperature=0.7,
)
I