LLM Wrapper
Basic Implementation
# utils/call_llm.py
import os
from openai import OpenAI
def call_llm(prompt, model="gpt-4o", temperature=0.7):
"""Simple wrapper for calling OpenAI's API"""
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
response = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
temperature=temperature
)
return response.choices[0].message.content// utils/callLLM.ts
import OpenAI from 'openai'
export async function callLLM(prompt: string, model: string = 'gpt-4o', temperature: number = 0.7): Promise<string> {
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
})
const response = await openai.chat.completions.create({
model,
messages: [{ role: 'user', content: prompt }],
temperature,
})
return response.choices[0]?.message?.content || ''
}Why Implement Your Own?
Integration with Caskada
Additional Considerations
Last updated