add single prompt call

This commit is contained in:
2023-12-21 16:53:31 -07:00
parent 02583c7e0e
commit 9810ffdeb8
3 changed files with 51 additions and 0 deletions

48
main.go
View File

@ -5,12 +5,17 @@ import (
"context"
"ctxGPT/database"
"fmt"
"os"
"strings"
"text/template"
"time"
"github.com/pkoukk/tiktoken-go"
"github.com/sashabaranov/go-openai"
)
const encodingName = "gpt-4"
const model = openai.GPT4TurboPreview
func main() {
@ -67,3 +72,46 @@ func GetTokenCount(input string) (int, error) {
token := tke.Encode(input, nil, nil)
return len(token), nil
}
// SinglePromptInteraction calls openai chat endpoint with just a system prompt and a user prompt and returns the response
func SinglePromptInteraction(systemPrompt, prompt string) (openai.ChatCompletionResponse, error) {
return singlePromptInteraction(systemPrompt, prompt, 5)
}
// singlePromptInteraction calls openai chat endpoint with just a system prompt and a user prompt and returns the response
// it also attempts 5 retries spaced 5 seconds apart in the case of rate limiting errors
func singlePromptInteraction(systemPrompt, prompt string, retries int) (openai.ChatCompletionResponse, error) {
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
messages := []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleSystem,
Content: systemPrompt,
},
{
Role: openai.ChatMessageRoleUser,
Content: prompt,
},
}
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: model, // switch to the configured Model
Messages: messages,
MaxTokens: 256,
Temperature: 0,
},
)
if err != nil {
// if 429, wait and try again
if strings.Contains(err.Error(), "429") && retries > 0 {
fmt.Println("429 error, waiting 5 seconds...")
time.Sleep(5 * time.Second)
return singlePromptInteraction(systemPrompt, prompt, retries-1) // TODO: establish base case to prevent forever retrying
}
return openai.ChatCompletionResponse{}, fmt.Errorf("ChatCompletion request error: %w", err)
}
return resp, nil
}