add single prompt call
This commit is contained in:
1
go.mod
1
go.mod
@ -4,6 +4,7 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/pkoukk/tiktoken-go v0.1.6
|
||||
github.com/sashabaranov/go-openai v1.17.9
|
||||
zombiezen.com/go/sqlite v0.13.1
|
||||
)
|
||||
|
||||
|
2
go.sum
2
go.sum
@ -14,6 +14,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/sashabaranov/go-openai v1.17.9 h1:QEoBiGKWW68W79YIfXWEFZ7l5cEgZBV4/Ow3uy+5hNY=
|
||||
github.com/sashabaranov/go-openai v1.17.9/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
|
48
main.go
48
main.go
@ -5,12 +5,17 @@ import (
|
||||
"context"
|
||||
"ctxGPT/database"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/pkoukk/tiktoken-go"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
const encodingName = "gpt-4"
|
||||
const model = openai.GPT4TurboPreview
|
||||
|
||||
func main() {
|
||||
|
||||
@ -67,3 +72,46 @@ func GetTokenCount(input string) (int, error) {
|
||||
token := tke.Encode(input, nil, nil)
|
||||
return len(token), nil
|
||||
}
|
||||
|
||||
// SinglePromptInteraction calls openai chat endpoint with just a system prompt and a user prompt and returns the response
|
||||
func SinglePromptInteraction(systemPrompt, prompt string) (openai.ChatCompletionResponse, error) {
|
||||
return singlePromptInteraction(systemPrompt, prompt, 5)
|
||||
}
|
||||
|
||||
// singlePromptInteraction calls openai chat endpoint with just a system prompt and a user prompt and returns the response
|
||||
// it also attempts 5 retries spaced 5 seconds apart in the case of rate limiting errors
|
||||
func singlePromptInteraction(systemPrompt, prompt string, retries int) (openai.ChatCompletionResponse, error) {
|
||||
|
||||
client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
|
||||
messages := []openai.ChatCompletionMessage{
|
||||
{
|
||||
Role: openai.ChatMessageRoleSystem,
|
||||
Content: systemPrompt,
|
||||
},
|
||||
{
|
||||
Role: openai.ChatMessageRoleUser,
|
||||
Content: prompt,
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.CreateChatCompletion(
|
||||
context.Background(),
|
||||
openai.ChatCompletionRequest{
|
||||
Model: model, // switch to the configured Model
|
||||
Messages: messages,
|
||||
MaxTokens: 256,
|
||||
Temperature: 0,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
// if 429, wait and try again
|
||||
if strings.Contains(err.Error(), "429") && retries > 0 {
|
||||
fmt.Println("429 error, waiting 5 seconds...")
|
||||
time.Sleep(5 * time.Second)
|
||||
return singlePromptInteraction(systemPrompt, prompt, retries-1) // TODO: establish base case to prevent forever retrying
|
||||
}
|
||||
return openai.ChatCompletionResponse{}, fmt.Errorf("ChatCompletion request error: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user