new version for apigo.cc/ai
This commit is contained in:
parent
998a9f4ad5
commit
f7fbd20ffa
20
ai_test.go
Normal file
20
ai_test.go
Normal file
@ -0,0 +1,20 @@
|
||||
package openai_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "apigo.cc/ai"
|
||||
_ "apigo.cc/ai/openai"
|
||||
"apigo.cc/gojs"
|
||||
_ "apigo.cc/gojs/console"
|
||||
"github.com/ssgo/u"
|
||||
)
|
||||
|
||||
func TestAI(t *testing.T) {
|
||||
gojs.ExportForDev()
|
||||
r, err := gojs.RunFile("ai_test.js")
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
println(u.Cyan(u.JsonP(r)))
|
||||
}
|
6
ai_test.js
Normal file
6
ai_test.js
Normal file
@ -0,0 +1,6 @@
|
||||
import co from 'apigo.cc/gojs/console'
|
||||
import ai from 'apigo.cc/ai'
|
||||
|
||||
return ai.openai.fastAsk('用一句话介绍一下你的主人', co.info, {
|
||||
systemPrompt: '你的主人叫张三,是个程序员'
|
||||
})
|
172
chat.go
172
chat.go
@ -1,135 +1,101 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"apigo.cc/ai/llm/llm"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"apigo.cc/ai"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
"github.com/ssgo/log"
|
||||
"github.com/ssgo/u"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (lm *LLM) FastAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4o_mini_2024_07_18,
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) LongAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4_32k_0613,
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) BatterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4_turbo,
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) BestAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4o_2024_08_06,
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) MultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4o_mini_2024_07_18,
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) BestMultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4o_2024_08_06,
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) CodeInterpreterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4o,
|
||||
Tools: map[string]any{llm.ToolCodeInterpreter: nil},
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) WebSearchAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
|
||||
return lm.Ask(messages, llm.ChatConfig{
|
||||
Model: ModelGPT_4o_mini_2024_07_18,
|
||||
Tools: map[string]any{llm.ToolWebSearch: nil},
|
||||
}, callback)
|
||||
}
|
||||
|
||||
func (lm *LLM) Ask(messages []llm.ChatMessage, config llm.ChatConfig, callback func(answer string)) (string, llm.Usage, error) {
|
||||
openaiConf := openai.DefaultConfig(lm.config.ApiKey)
|
||||
if lm.config.Endpoint != "" {
|
||||
openaiConf.BaseURL = lm.config.Endpoint
|
||||
func getClient(aiConf *ai.AIConfig) *openai.Client {
|
||||
openaiConf := openai.DefaultConfig(aiConf.ApiKey)
|
||||
if aiConf.Endpoint != "" {
|
||||
openaiConf.BaseURL = aiConf.Endpoint
|
||||
}
|
||||
return openai.NewClientWithConfig(openaiConf)
|
||||
}
|
||||
|
||||
config.SetDefault(&lm.config.ChatConfig)
|
||||
|
||||
agentMessages := make([]openai.ChatCompletionMessage, len(messages))
|
||||
// func (lm *LLM) Ask(messages []ai.ChatMessage, config ai.ChatConfig, callback func(answer string)) (string, ai.Usage, error) {
|
||||
func Chat(aiConf *ai.AIConfig, messages []ai.ChatMessage, callback func(string), conf ai.ChatConfig) (ai.ChatResult, error) {
|
||||
chatMessages := make([]openai.ChatCompletionMessage, len(messages))
|
||||
for i, msg := range messages {
|
||||
var contents []openai.ChatMessagePart
|
||||
if msg.Contents != nil {
|
||||
contents = make([]openai.ChatMessagePart, len(msg.Contents))
|
||||
for j, inPart := range msg.Contents {
|
||||
part := openai.ChatMessagePart{}
|
||||
part.Type = TypeMap[inPart.Type]
|
||||
switch inPart.Type {
|
||||
case llm.TypeText:
|
||||
case ai.TypeText:
|
||||
part.Type = openai.ChatMessagePartTypeText
|
||||
part.Text = inPart.Content
|
||||
case llm.TypeImage:
|
||||
case ai.TypeImage:
|
||||
part.Type = openai.ChatMessagePartTypeImageURL
|
||||
part.ImageURL = &openai.ChatMessageImageURL{
|
||||
URL: inPart.Content,
|
||||
Detail: openai.ImageURLDetailAuto,
|
||||
}
|
||||
default:
|
||||
part.Type = openai.ChatMessagePartType(inPart.Type)
|
||||
part.Text = inPart.Content
|
||||
}
|
||||
contents[j] = part
|
||||
}
|
||||
}
|
||||
if len(contents) == 1 && contents[0].Type == llm.TypeText {
|
||||
agentMessages[i] = openai.ChatCompletionMessage{
|
||||
Role: RoleMap[msg.Role],
|
||||
if len(contents) == 1 && contents[0].Type == ai.TypeText {
|
||||
chatMessages[i] = openai.ChatCompletionMessage{
|
||||
Role: msg.Role,
|
||||
Content: contents[0].Text,
|
||||
}
|
||||
} else {
|
||||
agentMessages[i] = openai.ChatCompletionMessage{
|
||||
Role: RoleMap[msg.Role],
|
||||
chatMessages[i] = openai.ChatCompletionMessage{
|
||||
Role: msg.Role,
|
||||
MultiContent: contents,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if conf.SystemPrompt != "" {
|
||||
chatMessages = append([]openai.ChatCompletionMessage{{
|
||||
Role: openai.ChatMessageRoleSystem,
|
||||
Content: conf.SystemPrompt,
|
||||
}}, chatMessages...)
|
||||
}
|
||||
|
||||
opt := openai.ChatCompletionRequest{
|
||||
Model: config.GetModel(),
|
||||
Messages: agentMessages,
|
||||
MaxTokens: config.GetMaxTokens(),
|
||||
Temperature: float32(config.GetTemperature()),
|
||||
TopP: float32(config.GetTopP()),
|
||||
Model: conf.Model,
|
||||
Messages: chatMessages,
|
||||
MaxTokens: conf.MaxTokens,
|
||||
Temperature: float32(conf.Temperature),
|
||||
TopP: float32(conf.TopP),
|
||||
StreamOptions: &openai.StreamOptions{
|
||||
IncludeUsage: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name := range config.GetTools() {
|
||||
for name, toolConf := range conf.Tools {
|
||||
switch name {
|
||||
case llm.ToolCodeInterpreter:
|
||||
case ai.ToolCodeInterpreter:
|
||||
opt.Tools = append(opt.Tools, openai.Tool{Type: "code_interpreter"})
|
||||
case llm.ToolWebSearch:
|
||||
case ai.ToolFunction:
|
||||
conf := openai.FunctionDefinition{}
|
||||
u.Convert(toolConf, &conf)
|
||||
opt.Tools = append(opt.Tools, openai.Tool{Type: openai.ToolTypeFunction, Function: &conf})
|
||||
}
|
||||
}
|
||||
|
||||
c := openai.NewClientWithConfig(openaiConf)
|
||||
c := getClient(aiConf)
|
||||
if callback != nil {
|
||||
opt.Stream = true
|
||||
r, err := c.CreateChatCompletionStream(context.Background(), opt)
|
||||
if err == nil {
|
||||
results := make([]string, 0)
|
||||
usage := llm.Usage{}
|
||||
out := ai.ChatResult{}
|
||||
for {
|
||||
if r2, err := r.Recv(); err == nil {
|
||||
if r2.Choices != nil {
|
||||
@ -140,19 +106,20 @@ func (lm *LLM) Ask(messages []llm.ChatMessage, config llm.ChatConfig, callback f
|
||||
}
|
||||
}
|
||||
if r2.Usage != nil {
|
||||
usage.AskTokens += int64(r2.Usage.PromptTokens)
|
||||
usage.AnswerTokens += int64(r2.Usage.CompletionTokens)
|
||||
usage.TotalTokens += int64(r2.Usage.TotalTokens)
|
||||
out.AskTokens += int64(r2.Usage.PromptTokens)
|
||||
out.AnswerTokens += int64(r2.Usage.CompletionTokens)
|
||||
out.TotalTokens += int64(r2.Usage.TotalTokens)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
_ = r.Close()
|
||||
return strings.Join(results, ""), usage, nil
|
||||
out.Result = strings.Join(results, "")
|
||||
return out, nil
|
||||
} else {
|
||||
log.DefaultLogger.Error(err.Error())
|
||||
return "", llm.Usage{}, err
|
||||
return ai.ChatResult{}, err
|
||||
}
|
||||
} else {
|
||||
t1 := time.Now().UnixMilli()
|
||||
@ -164,47 +131,29 @@ func (lm *LLM) Ask(messages []llm.ChatMessage, config llm.ChatConfig, callback f
|
||||
results = append(results, ch.Message.Content)
|
||||
}
|
||||
}
|
||||
return strings.Join(results, ""), llm.Usage{
|
||||
return ai.ChatResult{
|
||||
Result: strings.Join(results, ""),
|
||||
AskTokens: int64(r.Usage.PromptTokens),
|
||||
AnswerTokens: int64(r.Usage.CompletionTokens),
|
||||
TotalTokens: int64(r.Usage.TotalTokens),
|
||||
UsedTime: t2,
|
||||
}, nil
|
||||
} else {
|
||||
//fmt.Println(u.BMagenta(err.Error()), u.BMagenta(u.JsonP(r)))
|
||||
return "", llm.Usage{}, err
|
||||
return ai.ChatResult{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (lm *LLM) FastEmbedding(text string) ([]byte, llm.Usage, error) {
|
||||
return lm.Embedding(text, string(openai.AdaEmbeddingV2))
|
||||
}
|
||||
|
||||
func (lm *LLM) BestEmbedding(text string) ([]byte, llm.Usage, error) {
|
||||
return lm.Embedding(text, string(openai.LargeEmbedding3))
|
||||
}
|
||||
|
||||
func (lm *LLM) Embedding(text, model string) ([]byte, llm.Usage, error) {
|
||||
fmt.Println(111, model, text)
|
||||
openaiConf := openai.DefaultConfig(lm.config.ApiKey)
|
||||
if lm.config.Endpoint != "" {
|
||||
openaiConf.BaseURL = lm.config.Endpoint
|
||||
}
|
||||
|
||||
c := openai.NewClientWithConfig(openaiConf)
|
||||
func Embedding(aiConf *ai.AIConfig, text string, embeddingConf ai.EmbeddingConfig) (ai.EmbeddingResult, error) {
|
||||
c := getClient(aiConf)
|
||||
req := openai.EmbeddingRequest{
|
||||
Input: text,
|
||||
Model: openai.EmbeddingModel(model),
|
||||
Model: openai.EmbeddingModel(embeddingConf.Model),
|
||||
User: "",
|
||||
EncodingFormat: "",
|
||||
Dimensions: 0,
|
||||
}
|
||||
|
||||
if lm.config.Debug {
|
||||
fmt.Println(u.JsonP(req))
|
||||
}
|
||||
|
||||
t1 := time.Now().UnixMilli()
|
||||
if r, err := c.CreateEmbeddings(context.Background(), req); err == nil {
|
||||
t2 := time.Now().UnixMilli() - t1
|
||||
@ -216,15 +165,14 @@ func (lm *LLM) Embedding(text, model string) ([]byte, llm.Usage, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println(len(buf.Bytes()))
|
||||
return buf.Bytes(), llm.Usage{
|
||||
return ai.EmbeddingResult{
|
||||
Result: buf.Bytes(),
|
||||
AskTokens: int64(r.Usage.PromptTokens),
|
||||
AnswerTokens: int64(r.Usage.CompletionTokens),
|
||||
TotalTokens: int64(r.Usage.TotalTokens),
|
||||
UsedTime: t2,
|
||||
}, nil
|
||||
} else {
|
||||
fmt.Println(err.Error())
|
||||
return nil, llm.Usage{}, err
|
||||
return ai.EmbeddingResult{}, err
|
||||
}
|
||||
}
|
||||
|
88
config.go
88
config.go
@ -1,81 +1,25 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"apigo.cc/ai/llm/llm"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
_ "embed"
|
||||
|
||||
"apigo.cc/ai"
|
||||
"github.com/ssgo/u"
|
||||
)
|
||||
|
||||
type LLM struct {
|
||||
config llm.Config
|
||||
}
|
||||
|
||||
var TypeMap = map[string]openai.ChatMessagePartType{
|
||||
llm.TypeText: openai.ChatMessagePartTypeText,
|
||||
llm.TypeImage: openai.ChatMessagePartTypeImageURL,
|
||||
//llm.TypeVideo: "video_url",
|
||||
}
|
||||
var RoleMap = map[string]string{
|
||||
llm.RoleSystem: openai.ChatMessageRoleSystem,
|
||||
llm.RoleUser: openai.ChatMessageRoleUser,
|
||||
llm.RoleAssistant: openai.ChatMessageRoleAssistant,
|
||||
llm.RoleTool: openai.ChatMessageRoleTool,
|
||||
}
|
||||
|
||||
const (
|
||||
ModelGPT_4_32k_0613 = "gpt-4-32k-0613"
|
||||
ModelGPT_4_32k_0314 = "gpt-4-32k-0314"
|
||||
ModelGPT_4_32k = "gpt-4-32k"
|
||||
ModelGPT_4_0613 = "gpt-4-0613"
|
||||
ModelGPT_4_0314 = "gpt-4-0314"
|
||||
ModelGPT_4o = "gpt-4o"
|
||||
ModelGPT_4o_2024_05_13 = "gpt-4o-2024-05-13"
|
||||
ModelGPT_4o_2024_08_06 = "gpt-4o-2024-08-06"
|
||||
ModelGPT_4o_mini = "gpt-4o-mini"
|
||||
ModelGPT_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18"
|
||||
ModelGPT_4_turbo = "gpt-4-turbo"
|
||||
ModelGPT_4_turbo_2024_04_09 = "gpt-4-turbo-2024-04-09"
|
||||
ModelGPT_4_0125_preview = "gpt-4-0125-preview"
|
||||
ModelGPT_4_1106_preview = "gpt-4-1106-preview"
|
||||
ModelGPT_4_turbo_preview = "gpt-4-turbo-preview"
|
||||
ModelGPT_4_vision_preview = "gpt-4-vision-preview"
|
||||
ModelGPT_4 = "gpt-4"
|
||||
ModelGPT_3_5_turbo_0125 = "gpt-3.5-turbo-0125"
|
||||
ModelGPT_3_5_turbo_1106 = "gpt-3.5-turbo-1106"
|
||||
ModelGPT_3_5_turbo_0613 = "gpt-3.5-turbo-0613"
|
||||
ModelGPT_3_5_turbo_0301 = "gpt-3.5-turbo-0301"
|
||||
ModelGPT_3_5_turbo_16k = "gpt-3.5-turbo-16k"
|
||||
ModelGPT_3_5_turbo_16k_0613 = "gpt-3.5-turbo-16k-0613"
|
||||
ModelGPT_3_5_turbo = "gpt-3.5-turbo"
|
||||
ModelGPT_3_5_turbo_instruct = "gpt-3.5-turbo-instruct"
|
||||
ModelDavinci_002 = "davinci-002"
|
||||
ModelCurie = "curie"
|
||||
ModelCurie_002 = "curie-002"
|
||||
ModelAda_002 = "ada-002"
|
||||
ModelBabbage_002 = "babbage-002"
|
||||
ModelCode_davinci_002 = "code-davinci-002"
|
||||
ModelCode_cushman_001 = "code-cushman-001"
|
||||
ModelCode_davinci_001 = "code-davinci-001"
|
||||
ModelDallE2Std = "dall-e-2"
|
||||
ModelDallE2HD = "dall-e-2-hd"
|
||||
ModelDallE3Std = "dall-e-3"
|
||||
ModelDallE3HD = "dall-e-3-hd"
|
||||
)
|
||||
|
||||
func (ag *LLM) Support() llm.Support {
|
||||
return llm.Support{
|
||||
Ask: true,
|
||||
AskWithImage: true,
|
||||
AskWithVideo: false,
|
||||
AskWithCodeInterpreter: true,
|
||||
AskWithWebSearch: false,
|
||||
MakeImage: true,
|
||||
MakeVideo: false,
|
||||
Models: []string{ModelGPT_4_32k_0613, ModelGPT_4_32k_0314, ModelGPT_4_32k, ModelGPT_4_0613, ModelGPT_4_0314, ModelGPT_4o, ModelGPT_4o_2024_05_13, ModelGPT_4o_2024_08_06, ModelGPT_4o_mini, ModelGPT_4o_mini_2024_07_18, ModelGPT_4_turbo, ModelGPT_4_turbo_2024_04_09, ModelGPT_4_0125_preview, ModelGPT_4_1106_preview, ModelGPT_4_turbo_preview, ModelGPT_4_vision_preview, ModelGPT_4, ModelGPT_3_5_turbo_0125, ModelGPT_3_5_turbo_1106, ModelGPT_3_5_turbo_0613, ModelGPT_3_5_turbo_0301, ModelGPT_3_5_turbo_16k, ModelGPT_3_5_turbo_16k_0613, ModelGPT_3_5_turbo, ModelGPT_3_5_turbo_instruct, ModelDavinci_002, ModelCurie, ModelCurie_002, ModelAda_002, ModelBabbage_002, ModelCode_davinci_002, ModelCode_cushman_001, ModelCode_davinci_001, ModelDallE2Std, ModelDallE2HD, ModelDallE3Std, ModelDallE3HD},
|
||||
}
|
||||
}
|
||||
//go:embed default.yml
|
||||
var defaultYml string
|
||||
|
||||
func init() {
|
||||
llm.Register("openai", func(config llm.Config) llm.LLM {
|
||||
return &LLM{config: config}
|
||||
defaultConf := ai.AILoadConfig{}
|
||||
u.Convert(u.UnYamlMap(defaultYml), &defaultConf)
|
||||
ai.Register("openai", &ai.Agent{
|
||||
ChatConfigs: defaultConf.Chat,
|
||||
EmbeddingConfigs: defaultConf.Embedding,
|
||||
ImageConfigs: defaultConf.Image,
|
||||
VideoConfigs: defaultConf.Video,
|
||||
Chat: Chat,
|
||||
Embedding: Embedding,
|
||||
MakeImage: MakeImage,
|
||||
})
|
||||
}
|
||||
|
31
default.yml
Normal file
31
default.yml
Normal file
@ -0,0 +1,31 @@
|
||||
chat:
|
||||
fastAsk:
|
||||
model: gpt-4o-mini-2024-07-18
|
||||
longAsk:
|
||||
model: gpt-4-32k-0613
|
||||
turboAsk:
|
||||
model: gpt-4-turbo
|
||||
visionAsk:
|
||||
model: gpt-4-vision-preview
|
||||
bestAsk:
|
||||
model: gpt-4o-2024-08-06
|
||||
codeInterpreter:
|
||||
model: gpt-4o-2024-08-06
|
||||
tools:
|
||||
codeInterpreter:
|
||||
embedding:
|
||||
embedding:
|
||||
model: text-embedding-3-small
|
||||
embeddingLarge:
|
||||
model: text-embedding-3-large
|
||||
image:
|
||||
makeImage:
|
||||
model: dall-e-3
|
||||
quality: standard
|
||||
width: 1024
|
||||
height: 1024
|
||||
makeImageHD:
|
||||
model: dall-e-3-hd
|
||||
quality: hd
|
||||
width: 1024
|
||||
height: 1024
|
77
gc.go
77
gc.go
@ -1,55 +1,36 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"apigo.cc/ai/llm/llm"
|
||||
"context"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"apigo.cc/ai"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
// func (lm *LLM) FastMakeImage(prompt, size, refImage string) ([]string, llm.Usage, error) {
|
||||
// return lm.MakeImage(ModelDallE3Std, prompt, size, refImage)
|
||||
// }
|
||||
//
|
||||
// func (lm *LLM) BestMakeImage(prompt, size, refImage string) ([]string, llm.Usage, error) {
|
||||
// return lm.MakeImage(ModelDallE3HD, prompt, size, refImage)
|
||||
// }
|
||||
//
|
||||
// func (lm *LLM) MakeImage(model, prompt, size, refImage string) ([]string, llm.Usage, error) {
|
||||
func (lm *LLM) FastMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
|
||||
config.Model = ModelDallE3Std
|
||||
return lm.MakeImage(prompt, config)
|
||||
}
|
||||
func MakeImage(aiConf *ai.AIConfig, conf ai.ImageConfig) (ai.ImageResult, error) {
|
||||
c := getClient(aiConf)
|
||||
|
||||
func (lm *LLM) BestMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
|
||||
config.Model = ModelDallE3HD
|
||||
return lm.MakeImage(prompt, config)
|
||||
}
|
||||
|
||||
func (lm *LLM) MakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
|
||||
openaiConf := openai.DefaultConfig(lm.config.ApiKey)
|
||||
if lm.config.Endpoint != "" {
|
||||
openaiConf.BaseURL = lm.config.Endpoint
|
||||
}
|
||||
config.SetDefault(&lm.config.GCConfig)
|
||||
c := openai.NewClientWithConfig(openaiConf)
|
||||
style := openai.CreateImageStyleVivid
|
||||
if (!strings.Contains(prompt, "vivid") || !strings.Contains(prompt, "生动的")) && (strings.Contains(prompt, "natural") || strings.Contains(prompt, "自然的")) {
|
||||
style := conf.Style
|
||||
if style == "" && (strings.Contains(conf.Prompt, "natural") || strings.Contains(conf.Prompt, "自然")) {
|
||||
style = openai.CreateImageStyleNatural
|
||||
}
|
||||
quality := openai.CreateImageQualityStandard
|
||||
model := config.GetModel()
|
||||
if strings.HasSuffix(model, "-hd") {
|
||||
quality = openai.CreateImageQualityHD
|
||||
model = model[0 : len(model)-3]
|
||||
if style == "" {
|
||||
style = openai.CreateImageStyleVivid
|
||||
}
|
||||
quality := conf.Quality
|
||||
if quality == "" {
|
||||
quality = openai.CreateImageQualityStandard
|
||||
}
|
||||
|
||||
t1 := time.Now().UnixMilli()
|
||||
r, err := c.CreateImage(context.Background(), openai.ImageRequest{
|
||||
Prompt: prompt,
|
||||
Model: model,
|
||||
Prompt: conf.SystemPrompt + conf.Prompt,
|
||||
Model: conf.Model,
|
||||
Quality: quality,
|
||||
Size: config.GetSize(),
|
||||
Size: fmt.Sprintf("%dx%d", conf.Width, conf.Height),
|
||||
Style: style,
|
||||
ResponseFormat: openai.CreateImageResponseFormatURL,
|
||||
})
|
||||
@ -59,25 +40,11 @@ func (lm *LLM) MakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usag
|
||||
for _, item := range r.Data {
|
||||
results = append(results, item.URL)
|
||||
}
|
||||
return results, llm.Usage{
|
||||
AskTokens: 0,
|
||||
AnswerTokens: 0,
|
||||
TotalTokens: 0,
|
||||
UsedTime: t2,
|
||||
return ai.ImageResult{
|
||||
Results: results,
|
||||
UsedTime: t2,
|
||||
}, nil
|
||||
} else {
|
||||
return nil, llm.Usage{}, err
|
||||
return ai.ImageResult{}, err
|
||||
}
|
||||
}
|
||||
|
||||
func (lm *LLM) FastMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
|
||||
return lm.MakeVideo(prompt, config)
|
||||
}
|
||||
|
||||
func (lm *LLM) BestMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
|
||||
return lm.MakeVideo(prompt, config)
|
||||
}
|
||||
|
||||
func (lm *LLM) MakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
|
||||
return nil, nil, llm.Usage{}, nil
|
||||
}
|
||||
|
15
go.mod
15
go.mod
@ -1,16 +1,25 @@
|
||||
module apigo.cc/ai/openai
|
||||
|
||||
go 1.22
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
apigo.cc/ai/llm v0.0.4
|
||||
github.com/sashabaranov/go-openai v1.29.1
|
||||
apigo.cc/ai v0.0.1
|
||||
apigo.cc/gojs v0.0.4
|
||||
apigo.cc/gojs/console v0.0.1
|
||||
github.com/sashabaranov/go-openai v1.32.5
|
||||
github.com/ssgo/log v1.7.7
|
||||
github.com/ssgo/u v1.7.9
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/dlclark/regexp2 v1.11.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
|
||||
github.com/ssgo/config v1.7.8 // indirect
|
||||
github.com/ssgo/standard v1.7.7 // indirect
|
||||
github.com/ssgo/tool v0.4.27 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user