update to latest gpt model

This commit is contained in:
2025-02-01 22:57:23 -07:00
parent 91da21defb
commit b926215e94
6 changed files with 72 additions and 20 deletions

View File

@ -12,11 +12,17 @@ import (
)
const encodingName = "gpt-4"
const model = openai.GPT4TurboPreview
const maxTokens = 4096
// const model = openai.GPT4Turbo
const model = "o3-mini"
// const model = openai.GPT4TurboPreview
const maxTokens = 100000
//const maxTokens = 4096
// const maxTokens = 128000
const temperature = 0.3
const temperature = 0.7
func GetTokenCount(input string) (int, error) {
tke, err := tiktoken.EncodingForModel(encodingName) // cached in "TIKTOKEN_CACHE_DIR"
@ -59,10 +65,10 @@ func singlePromptInteraction(systemPrompt, prompt string, retries int) (openai.C
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: model, // switch to the configured Model
Messages: messages,
MaxTokens: messageTokenSize, // might want to think about how to reduce this
Temperature: temperature,
Model: model, // switch to the configured Model
Messages: messages,
MaxCompletionTokens: messageTokenSize, // might want to think about how to reduce this
//Temperature: temperature,
},
)
if err != nil {
@ -100,7 +106,12 @@ func SendPrompt(messages []openai.ChatCompletionMessage) (openai.ChatCompletionR
}
func sendPrompt(messages []openai.ChatCompletionMessage, retries int) (openai.ChatCompletionResponse, error) {
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
config := openai.DefaultConfig(os.Getenv("OPENAI_API_KEY"))
if os.Getenv("OPENAI_BASE_URL") != "" {
config.BaseURL = os.Getenv("OPENAI_BASE_URL")
}
client := openai.NewClientWithConfig(config)
previousTokenCount, err := GetPreviousTokenUsage(messages)
if err != nil {
@ -113,10 +124,10 @@ func sendPrompt(messages []openai.ChatCompletionMessage, retries int) (openai.Ch
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: model, // switch to the configured Model
Messages: messages,
MaxTokens: messageTokenSize,
Temperature: temperature,
Model: model, // switch to the configured Model
Messages: messages,
MaxCompletionTokens: messageTokenSize,
//Temperature: temperature,
},
)
if err != nil {