1
基础对话
最简单的对话调用示例
Python
from openai import OpenAI
client = OpenAI(
api_key="YOUR_GETGOAPI_KEY",
base_url="https://api.getgoapi.com/v1"
)
response = client.chat.completions.create(
model="grok-4-1-fast-reasoning",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"}
]
)
print(response.choices[0].message.content)Node.js
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'YOUR_GETGOAPI_KEY',
baseURL: 'https://api.getgoapi.com/v1'
});
async function main() {
const response = await client.chat.completions.create({
model: 'grok-4-1-fast-reasoning',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is the capital of France?' }
]
});
console.log(response.choices[0].message.content);
}
main();cURL
curl https://api.getgoapi.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_GETGOAPI_KEY" \
-d '{
"model": "grok-4-1-fast-reasoning",
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"}
]
}'2
流式输出
实时流式获取AI响应
Python
from openai import OpenAI
client = OpenAI(
api_key="YOUR_GETGOAPI_KEY",
base_url="https://api.getgoapi.com/v1"
)
stream = client.chat.completions.create(
model="grok-4-1-fast-reasoning",
messages=[{"role": "user", "content": "Write a short story"}],
stream=True # 开启流式输出
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end='')Node.js
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'YOUR_GETGOAPI_KEY',
baseURL: 'https://api.getgoapi.com/v1'
});
async function main() {
const stream = await client.chat.completions.create({
model: 'grok-4-1-fast-reasoning',
messages: [{ role: 'user', content: 'Write a short story' }],
stream: true // 开启流式输出
});
for await (const chunk of stream) {
process.stdout.write(chunk.choices[0]?.delta?.content || '');
}
}
main();cURL
curl https://api.getgoapi.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_GETGOAPI_KEY" \
-d '{
"model": "grok-4-1-fast-reasoning",
"messages": [{"role": "user", "content": "Write a short story"}],
"stream": true
}'3
温度控制
调整输出的随机性
Python
from openai import OpenAI
client = OpenAI(
api_key="YOUR_GETGOAPI_KEY",
base_url="https://api.getgoapi.com/v1"
)
# temperature=0: 确定性输出(适合编程、数学)
response_deterministic = client.chat.completions.create(
model="grok-4-1-fast-reasoning",
messages=[{"role": "user", "content": "Write a Python function to sort a list"}],
temperature=0
)
# temperature=1: 创造性输出(适合写作、头脑风暴)
response_creative = client.chat.completions.create(
model="grok-4-1-fast-reasoning",
messages=[{"role": "user", "content": "Write a creative story"}],
temperature=1
)Node.js
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'YOUR_GETGOAPI_KEY',
baseURL: 'https://api.getgoapi.com/v1'
});
async function main() {
// temperature=0: 确定性输出
const deterministic = await client.chat.completions.create({
model: 'grok-4-1-fast-reasoning',
messages: [{ role: 'user', content: 'Write a Python function to sort a list' }],
temperature: 0
});
// temperature=1: 创造性输出
const creative = await client.chat.completions.create({
model: 'grok-4-1-fast-reasoning',
messages: [{ role: 'user', content: 'Write a creative story' }],
temperature: 1
});
}
main();cURL
curl https://api.getgoapi.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_GETGOAPI_KEY" \
-d '{
"model": "grok-4-1-fast-reasoning",
"messages": [{"role": "user", "content": "Write a Python function"}],
"temperature": 0
}'4
限制输出长度
控制生成的token数量
Python
from openai import OpenAI
client = OpenAI(
api_key="YOUR_GETGOAPI_KEY",
base_url="https://api.getgoapi.com/v1"
)
response = client.chat.completions.create(
model="grok-4-1-fast-reasoning",
messages=[{"role": "user", "content": "Explain quantum computing"}],
max_tokens=100 # 限制输出100个tokens
)
print(response.choices[0].message.content)
print(f"Used tokens: {response.usage.total_tokens}")Node.js
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'YOUR_GETGOAPI_KEY',
baseURL: 'https://api.getgoapi.com/v1'
});
async function main() {
const response = await client.chat.completions.create({
model: 'grok-4-1-fast-reasoning',
messages: [{ role: 'user', content: 'Explain quantum computing' }],
max_tokens: 100 // 限制输出100个tokens
});
console.log(response.choices[0].message.content);
console.log(`Used tokens: ${response.usage.total_tokens}`);
}
main();cURL
curl https://api.getgoapi.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_GETGOAPI_KEY" \
-d '{
"model": "grok-4-1-fast-reasoning",
"messages": [{"role": "user", "content": "Explain quantum computing"}],
"max_tokens": 100
}'5
系统提示词
定制AI的行为和角色
Python
from openai import OpenAI
client = OpenAI(
api_key="YOUR_GETGOAPI_KEY",
base_url="https://api.getgoapi.com/v1"
)
response = client.chat.completions.create(
model="grok-4-1-fast-reasoning",
messages=[
{
"role": "system",
"content": "You are a professional Python developer. Always provide code with comments and best practices."
},
{
"role": "user",
"content": "Write a function to calculate Fibonacci sequence"
}
]
)
print(response.choices[0].message.content)Node.js
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'YOUR_GETGOAPI_KEY',
baseURL: 'https://api.getgoapi.com/v1'
});
async function main() {
const response = await client.chat.completions.create({
model: 'grok-4-1-fast-reasoning',
messages: [
{
role: 'system',
content: 'You are a professional Python developer. Always provide code with comments.'
},
{
role: 'user',
content: 'Write a function to calculate Fibonacci sequence'
}
]
});
console.log(response.choices[0].message.content);
}
main();cURL
curl https://api.getgoapi.com/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_GETGOAPI_KEY" \
-d '{
"model": "grok-4-1-fast-reasoning",
"messages": [
{
"role": "system",
"content": "You are a professional Python developer."
},
{
"role": "user",
"content": "Write a function to calculate Fibonacci sequence"
}
]
}'最佳实践
妥善保管API Key
不要在代码中硬编码API Key,使用环境变量或密钥管理服务。
错误处理
始终添加try-catch处理API调用异常,提供友好的错误提示。
控制Token使用
使用max_tokens限制输出长度,避免不必要的费用。
优化提示词
清晰简洁的提示词可以获得更好的回答,并节省token。