This commit is contained in:
Star 2024-10-02 14:09:54 +08:00
commit 70ded2fb95
20 changed files with 1910 additions and 0 deletions

6
.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
.*
!.gitignore
go.sum
env.yml
node_modules
package.json

9
LICENSE Normal file
View File

@ -0,0 +1,9 @@
MIT License
Copyright (c) 2024 apigo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

136
README.md Normal file
View File

@ -0,0 +1,136 @@
# AI大模型低代码工具
## 命令行工具
### Install
```shell
go install apigo.cc/ai/llm/llm-cli@latest
```
### Usage
```shell
llm-cli -h | help show usage
llm-cli -e | export export ai.ts file for develop
llm-cli test.js run test.js, if not specified, run ai.js
llm-cli -w | watch test.js run test.js, if .js files changed will be reloaded
```
### Sample
#### test.js
```javascript
import {zhipu} from 'llm'
import console from 'console'
function main(...args) {
let r = zhipu.fastAsk((args.length>0?args[0]:'你好', r => {
console.print(r)
})
console.println()
return r
}
```
#### run sample
```shell
llm-cli test.js "你好"
```
### Configure
#### env.yml
```yaml
llm:
openai:
default:
apiKey: ...
aurze:
apiKey: ...
endpoint: ...
zhipu:
default:
apiKey: ...
```
#### encrypt apiKey
install sskey
```shell
go install github.com/ssgo/tool/sskey@latest
sskey -e 'your apiKey'
```
copy url base64 format encrypted apiKey into llm.yml or env.yml
## 将 [llm](https://apigo.cc/ai/llm) 和 [低代码](https://apigo.cc/apigo/gojs) 集成到应用
### Install
```shell
go get -u apigo.cc/apigo/gojs
go get -u apigo.cc/ai/llm
```
### Usage
```go
package main
import (
_ "apigo.cc/ai/llm"
"apigo.cc/apigo/gojs"
_ "apigo.cc/apigo/gojs/modules"
)
func main() {
result, err := gojs.Run(`return llm.zhipu.fastAsk('你好', console.print)`, "")
if err != nil {
fmt.Println(err.Error())
} else if result != nil {
fmt.Println(result)
}
}
```
## 调用 Go API
### Install
```shell
go get -u apigo.cc/ai/llm
```
### Usage
```go
package main
import (
"apigo.cc/ai/llm/llm"
"fmt"
)
func main() {
zhipu := llm.Get("zhipu")
r, usage, err := zhipu.FastAsk(llm.Messages().User().Text("你是什么模型").Make(), func(text string) {
fmt.Print(text)
})
if err != nil {
fmt.Println(err)
} else {
fmt.Println()
fmt.Println("result:", r)
fmt.Println("usage:", usage)
}
}
```

80
config.go Normal file
View File

@ -0,0 +1,80 @@
package llm
import (
"apigo.cc/ai/llm/llm"
"apigo.cc/apigo/gojs"
"apigo.cc/apigo/gojs/dop251/goja"
"bytes"
_ "embed"
"github.com/ssgo/config"
"github.com/ssgo/u"
"text/template"
)
//go:embed llm.ts
var llmTS string
//go:embed llm.md
var llmMD string
var confAes = u.NewAes([]byte("?GQ$0K0GgLdO=f+~L68PLm$uhKr4'=tV"), []byte("VFs7@sK61cj^f?HZ"))
var keysIsSet = false
func SetSSKey(key, iv []byte) {
if !keysIsSet {
confAes = u.NewAes(key, iv)
keysIsSet = true
}
}
func init() {
list := map[string]*map[string]*struct {
Endpoint string
ApiKey string
ChatConfig llm.ChatConfig
GCConfig llm.GCConfig
Debug bool
}{}
jsObj := gojs.Map{}
llmList := make([]string, 0)
_ = config.LoadConfig("llm", &list)
for llmName, llmConfigs := range list {
for confName, llmConf := range *llmConfigs {
llmConf.ApiKey = confAes.DecryptUrlBase64ToString(llmConf.ApiKey)
if confName == "default" {
confName = llmName
}
llmList = append(llmList, confName)
llmObj := llm.Create(confName, llmName, llm.Config{
Endpoint: llmConf.Endpoint,
ApiKey: llmConf.ApiKey,
ChatConfig: llmConf.ChatConfig,
GCConfig: llmConf.GCConfig,
Debug: llmConf.Debug,
})
jsObj[confName] = MakeLLM(llmObj)
}
}
jsObj["similarity"] = func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(2)
return vm.ToValue(llm.Similarity(args.Bytes(0), args.Bytes(1)))
}
var tpl *template.Template
var err error
llmTSCode := ""
if tpl, err = template.New("").Parse(llmTS); err == nil {
buf := bytes.NewBuffer(make([]byte, 0))
if err = tpl.Execute(buf, llmList); err == nil {
llmTSCode = string(buf.Bytes())
}
}
gojs.Register("llm", gojs.Module{
Object: jsObj,
TsCode: llmTSCode,
Desc: "llm plugin for gojs(http://apigo.cc/apigo/gojs)",
Example: llmMD,
})
}

33
go.mod Normal file
View File

@ -0,0 +1,33 @@
module apigo.cc/ai/llm
go 1.18
require (
apigo.cc/apigo/gojs v0.1.1
github.com/go-resty/resty/v2 v2.15.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/sashabaranov/go-openai v1.30.3
github.com/ssgo/config v1.7.7
github.com/ssgo/log v1.7.7
github.com/ssgo/u v1.7.7
github.com/yankeguo/zhipu v0.1.2
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/dlclark/regexp2 v1.11.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
github.com/go-sql-driver/mysql v1.8.1 // indirect
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/ssgo/dao v0.1.5 // indirect
github.com/ssgo/db v1.7.9 // indirect
github.com/ssgo/httpclient v1.7.7 // indirect
github.com/ssgo/standard v1.7.7 // indirect
github.com/ssgo/tool v0.4.27 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/sys v0.25.0 // indirect
golang.org/x/text v0.18.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

71
llm-cli/main.go Normal file
View File

@ -0,0 +1,71 @@
package main
import (
_ "apigo.cc/ai/llm"
"apigo.cc/apigo/gojs"
_ "apigo.cc/apigo/gojs/modules"
"fmt"
"github.com/ssgo/u"
"os"
)
func main() {
args := os.Args[1:]
if len(args) > 0 && (args[0] == "-e" || args[0] == "export") {
imports := gojs.ExportForDev()
fmt.Println(`exported to node_modules
test.js:
`)
fmt.Println(u.Cyan(imports + `
function main(...args) {
let r = llm.zhipu.fastAsk(args.length>0?args[0]:'你好', console.print)
console.println()
return r
}
run:
llm-cli test.js 你是谁
`))
return
}
isWatch := false
if len(args) > 0 && args[0] == "-w" {
isWatch = true
args = args[1:]
}
jsFile := ""
if len(args) > 0 {
jsFile = args[0]
args = args[1:]
}
if jsFile != "" && u.FileExists(jsFile) {
if isWatch {
if w, err := gojs.WatchRun(jsFile, nil, nil, u.ToInterfaceArray(args)...); err == nil {
w.WaitForKill()
} else {
fmt.Println(u.BRed(err.Error()))
}
} else {
if _, err := gojs.RunFile(jsFile, u.ToInterfaceArray(args)...); err != nil {
fmt.Println(u.BRed(err.Error()))
}
}
return
}
fmt.Println(`Usage:
llm-cli -h | help show usage
llm-cli -e | export export ai.ts file for develop
llm-cli test.js run test.js, if not specified, run ai.js
llm-cli -w | watch test.js run test.js, if .js files changed will be reloaded
`)
return
}

344
llm.go Normal file
View File

@ -0,0 +1,344 @@
package llm
import (
"apigo.cc/ai/llm/llm"
_ "apigo.cc/ai/llm/openai"
_ "apigo.cc/ai/llm/zhipu"
"apigo.cc/apigo/gojs"
"apigo.cc/apigo/gojs/dop251/goja"
"github.com/ssgo/u"
"reflect"
"strings"
)
func MakeLLM(lm llm.LLM) map[string]any {
return map[string]any{
"ask": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
conf, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.Ask(makeChatMessages(args.Arguments), conf, cb)
return makeChatResult(vm, result, &usage, err)
},
"fastAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.FastAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"longAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.LongAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"batterAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.BatterAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"bestAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.BestAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"multiAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.MultiAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"bestMultiAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.BestMultiAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"codeInterpreterAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.CodeInterpreterAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"webSearchAsk": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
_, cb := getAskArgs(args.This, vm, args.Arguments)
result, usage, err := lm.WebSearchAsk(makeChatMessages(args.Arguments), cb)
return makeChatResult(vm, result, &usage, err)
},
"makeImage": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
prompt, conf := getGCArgs(args.Arguments)
results, usage, err := lm.MakeImage(prompt, conf)
return makeGCResult(vm, results, nil, &usage, err)
},
"fastMakeImage": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
prompt, conf := getGCArgs(args.Arguments)
results, usage, err := lm.FastMakeImage(prompt, conf)
return makeGCResult(vm, results, nil, &usage, err)
},
"bestMakeImage": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
prompt, conf := getGCArgs(args.Arguments)
results, usage, err := lm.BestMakeImage(prompt, conf)
return makeGCResult(vm, results, nil, &usage, err)
},
"makeVideo": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
prompt, conf := getGCArgs(args.Arguments)
results, previews, usage, err := lm.MakeVideo(prompt, conf)
return makeGCResult(vm, results, previews, &usage, err)
},
"fastMakeVideo": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
prompt, conf := getGCArgs(args.Arguments)
results, previews, usage, err := lm.FastMakeVideo(prompt, conf)
return makeGCResult(vm, results, previews, &usage, err)
},
"bestMakeVideo": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
prompt, conf := getGCArgs(args.Arguments)
results, previews, usage, err := lm.BestMakeVideo(prompt, conf)
return makeGCResult(vm, results, previews, &usage, err)
},
"embedding": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(2)
results, usage, err := lm.Embedding(args.Str(0), args.Str(1))
return makeEmbeddingResult(vm, results, &usage, err)
},
"fastEmbedding": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
results, usage, err := lm.FastEmbedding(args.Str(0))
return makeEmbeddingResult(vm, results, &usage, err)
},
"bestEmbedding": func(argsIn goja.FunctionCall, vm *goja.Runtime) goja.Value {
args := gojs.MakeArgs(&argsIn, vm).Check(1)
results, usage, err := lm.BestEmbedding(args.Str(0))
return makeEmbeddingResult(vm, results, &usage, err)
},
"support": lm.Support(),
}
}
func getErrorStr(err error) string {
if err != nil {
return err.Error()
}
return ""
}
func makeChatResult(vm *goja.Runtime, result string, usage *llm.Usage, err error) goja.Value {
if err != nil {
panic(vm.NewGoError(err))
}
return vm.ToValue(map[string]any{
"result": result,
"askTokens": usage.AskTokens,
"answerTokens": usage.AnswerTokens,
"totalTokens": usage.TotalTokens,
"usedTime": usage.UsedTime,
})
}
func makeEmbeddingResult(vm *goja.Runtime, result []byte, usage *llm.Usage, err error) goja.Value {
if err != nil {
panic(vm.NewGoError(err))
}
return vm.ToValue(map[string]any{
"result": result,
"askTokens": usage.AskTokens,
"answerTokens": usage.AnswerTokens,
"totalTokens": usage.TotalTokens,
"usedTime": usage.UsedTime,
})
}
func makeGCResult(vm *goja.Runtime, results []string, previews []string, usage *llm.Usage, err error) goja.Value {
if err != nil {
panic(vm.NewGoError(err))
}
result := ""
preview := ""
if len(results) > 0 {
result = results[0]
} else {
results = make([]string, 0)
}
if len(previews) > 0 {
preview = previews[0]
} else {
previews = make([]string, 0)
}
return vm.ToValue(map[string]any{
"result": result,
"preview": preview,
"results": results,
"previews": previews,
"usedTime": usage.UsedTime,
})
}
func getGCArgs(args []goja.Value) (string, llm.GCConfig) {
prompt := ""
var config llm.GCConfig
if len(args) > 0 {
prompt = u.String(args[0].Export())
if len(args) > 1 {
u.Convert(args[1].Export(), &config)
}
}
return prompt, config
}
func getAskArgs(thisArg goja.Value, vm *goja.Runtime, args []goja.Value) (llm.ChatConfig, func(string)) {
var chatConfig llm.ChatConfig
var callback func(answer string)
if len(args) > 0 {
for i := 1; i < len(args); i++ {
if cb, ok := goja.AssertFunction(args[i]); ok {
callback = func(answer string) {
_, _ = cb(thisArg, vm.ToValue(answer))
}
} else if args[i].ExportType() != nil {
switch args[i].ExportType().Kind() {
case reflect.Map, reflect.Struct:
u.Convert(args[i].Export(), &chatConfig)
default:
chatConfig.Model = u.String(args[i].Export())
}
}
}
}
return chatConfig, callback
}
func makeChatMessages(args []goja.Value) []llm.ChatMessage {
out := make([]llm.ChatMessage, 0)
if len(args) > 0 {
v := args[0].Export()
vv := reflect.ValueOf(v)
t := args[0].ExportType()
if t != nil {
lastRoleIsUser := false
switch t.Kind() {
// 数组,根据成员类型处理
// 字符串:
// 含有媒体:单条多模态消息
// 无媒体:多条文本消息
// 数组:多条消息(第一个成员不是 role 则自动生成)
// 对象:多条消息(无 role 则自动生成)(支持 content 或 contents
// 结构:转换为 llm.ChatMessage
// 对象:单条消息(支持 content 或 contents
// 结构:转换为 llm.ChatMessage
// 字符串:单条文本消息
case reflect.Slice:
hasSub := false
hasMulti := false
for i := 0; i < vv.Len(); i++ {
vv2 := u.FinalValue(vv.Index(i))
if vv2.Kind() == reflect.Slice || vv2.Kind() == reflect.Map || vv2.Kind() == reflect.Struct {
hasSub = true
break
}
if vv2.Kind() == reflect.String {
str := vv2.String()
if strings.HasPrefix(str, "data:") || strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "http://") {
hasMulti = true
}
}
}
if hasSub || !hasMulti {
// 有子对象或纯文本数组
var defaultRole string
for i := 0; i < vv.Len(); i++ {
lastRoleIsUser = !lastRoleIsUser
if lastRoleIsUser {
defaultRole = llm.RoleUser
} else {
defaultRole = llm.RoleAssistant
}
vv2 := u.FinalValue(vv.Index(i))
switch vv2.Kind() {
case reflect.Slice:
out = append(out, makeChatMessageFromSlice(vv2, defaultRole))
case reflect.Map:
out = append(out, makeChatMessageFromSlice(vv2, defaultRole))
case reflect.Struct:
item := llm.ChatMessage{}
u.Convert(vv2.Interface(), &item)
out = append(out, item)
default:
out = append(out, llm.ChatMessage{Role: llm.RoleUser, Contents: []llm.ChatMessageContent{makeChatMessageContent(u.String(vv2.Interface()))}})
}
lastRoleIsUser = out[len(out)-1].Role != llm.RoleUser
}
} else {
// 单条多模态消息
out = append(out, makeChatMessageFromSlice(vv, llm.RoleUser))
}
case reflect.Map:
out = append(out, makeChatMessageFromMap(vv, llm.RoleUser))
case reflect.Struct:
item := llm.ChatMessage{}
u.Convert(v, &item)
out = append(out, item)
default:
out = append(out, llm.ChatMessage{Role: llm.RoleUser, Contents: []llm.ChatMessageContent{makeChatMessageContent(u.String(v))}})
}
}
}
return out
}
func makeChatMessageFromSlice(vv reflect.Value, defaultRole string) llm.ChatMessage {
role := u.String(vv.Index(0).Interface())
j := 0
if role == llm.RoleUser || role == llm.RoleAssistant || role == llm.RoleSystem || role == llm.RoleTool {
j = 1
} else {
role = defaultRole
}
contents := make([]llm.ChatMessageContent, 0)
for ; j < vv.Len(); j++ {
contents = append(contents, makeChatMessageContent(u.String(vv.Index(j).Interface())))
}
return llm.ChatMessage{Role: role, Contents: contents}
}
func makeChatMessageFromMap(vv reflect.Value, defaultRole string) llm.ChatMessage {
role := u.String(vv.MapIndex(reflect.ValueOf("role")).Interface())
if role == "" {
role = defaultRole
}
contents := make([]llm.ChatMessageContent, 0)
content := u.String(vv.MapIndex(reflect.ValueOf("content")).Interface())
if content != "" {
contents = append(contents, makeChatMessageContent(content))
} else {
contentsV := vv.MapIndex(reflect.ValueOf("contents"))
if contentsV.IsValid() && contentsV.Kind() == reflect.Slice {
for i := 0; i < contentsV.Len(); i++ {
contents = append(contents, makeChatMessageContent(u.String(contentsV.Index(i).Interface())))
}
}
}
return llm.ChatMessage{Role: role, Contents: contents}
}
func makeChatMessageContent(contnet string) llm.ChatMessageContent {
if strings.HasPrefix(contnet, "data:image/") || ((strings.HasPrefix(contnet, "https://") || strings.HasPrefix(contnet, "http://")) && (strings.HasSuffix(contnet, ".png") || strings.HasSuffix(contnet, ".jpg") || strings.HasSuffix(contnet, ".jpeg") || strings.HasSuffix(contnet, ".gif") || strings.HasSuffix(contnet, ".svg"))) {
return llm.ChatMessageContent{Type: llm.TypeImage, Content: contnet}
} else if strings.HasPrefix(contnet, "data:video/") || ((strings.HasPrefix(contnet, "https://") || strings.HasPrefix(contnet, "http://")) && (strings.HasSuffix(contnet, ".mp4") || strings.HasSuffix(contnet, ".mov") || strings.HasSuffix(contnet, ".m4v") || strings.HasSuffix(contnet, ".avi") || strings.HasSuffix(contnet, ".wmv"))) {
return llm.ChatMessageContent{Type: llm.TypeVideo, Content: contnet}
}
return llm.ChatMessageContent{Type: llm.TypeText, Content: contnet}
}

39
llm.md Normal file
View File

@ -0,0 +1,39 @@
### Configure
#### env.yml
```yaml
llm:
openai:
default:
apiKey: ...
aurze:
apiKey: ...
endpoint: ...
zhipu:
default:
apiKey: ...
```
#### encrypt apiKey
```shell
go install github.com/ssgo/tool/sskey@latest
sskey -e 'your apiKey' | grep 'url base64'
```
js code:
```javascript
import {zhipu, aurze} from 'llm'
import console from 'console'
function main(...args) {
let r = zhipu.fastAsk((args.length>0?args[0]:'你好', r => {
console.print(r)
})
console.println()
return r
}
```

86
llm.ts Normal file
View File

@ -0,0 +1,86 @@
// just for develop
{{range .}}
let {{.}}: LLM
{{- end}}
export default {
similarity,
{{- range .}}
{{.}},
{{- end}}
}
function similarity(a: any, b: any): number{return 0}
interface ChatConfig {
model: string
ratio: number
maxTokens: number
temperature: number
topP: number
tools: Object
}
interface ChatResult {
result: string
askTokens: number
answerTokens: number
totalTokens: number
usedTime: number
}
interface GCConfig {
model: string
size: string
ref: string
}
interface GCResult {
result: string
preview: string
results: Array<string>
previews: Array<string>
usedTime: number
}
interface EmbeddingResult {
result: string
askTokens: number
answerTokens: number
totalTokens: number
usedTime: number
}
interface Support {
ask: boolean
askWithImage: boolean
askWithVideo: boolean
askWithCodeInterpreter: boolean
askWithWebSearch: boolean
makeImage: boolean
makeVideo: boolean
models: Array<string>
}
interface LLM {
ask(messages: any, config?: ChatConfig, callback?: (answer: string) => void): ChatResult
fastAsk(messages: any, callback?: (answer: string) => void): ChatResult
longAsk(messages: any, callback?: (answer: string) => void): ChatResult
batterAsk(messages: any, callback?: (answer: string) => void): ChatResult
bestAsk(messages: any, callback?: (answer: string) => void): ChatResult
multiAsk(messages: any, callback?: (answer: string) => void): ChatResult
bestMultiAsk(messages: any, callback?: (answer: string) => void): ChatResult
codeInterpreterAsk(messages: any, callback?: (answer: string) => void): ChatResult
webSearchAsk(messages: any, callback?: (answer: string) => void): ChatResult
makeImage(prompt: string, config?: GCConfig): GCResult
fastMakeImage(prompt: string, config?: GCConfig): GCResult
bestMakeImage(prompt: string, config?: GCConfig): GCResult
makeVideo(prompt: string, config?: GCConfig): GCResult
fastMakeVideo(prompt: string, config?: GCConfig): GCResult
bestMakeVideo(prompt: string, config?: GCConfig): GCResult
embedding(text: string, model: string): EmbeddingResult
fastEmbedding(text: string): EmbeddingResult
bestEmbedding(text: string): EmbeddingResult
support: Support
}

187
llm/chat.go Normal file
View File

@ -0,0 +1,187 @@
package llm
import (
"bytes"
"encoding/binary"
"math"
)
type ChatMessage struct {
Role string
Contents []ChatMessageContent
}
type ChatMessageContent struct {
Type string // text, image, audio, video
Content string
}
type ChatConfig struct {
defaultConfig *ChatConfig
Model string
Ratio float64
MaxTokens int
Temperature float64
TopP float64
Tools map[string]any
}
func (chatConfig *ChatConfig) SetDefault(config *ChatConfig) {
chatConfig.defaultConfig = config
}
func (chatConfig *ChatConfig) GetModel() string {
if chatConfig.Model == "" && chatConfig.defaultConfig != nil {
return chatConfig.defaultConfig.Model
}
return chatConfig.Model
}
func (chatConfig *ChatConfig) GetMaxTokens() int {
if chatConfig.MaxTokens == 0 && chatConfig.defaultConfig != nil {
return chatConfig.defaultConfig.MaxTokens
}
return chatConfig.MaxTokens
}
func (chatConfig *ChatConfig) GetTemperature() float64 {
if chatConfig.Temperature == 0 && chatConfig.defaultConfig != nil {
return chatConfig.defaultConfig.Temperature
}
return chatConfig.Temperature
}
func (chatConfig *ChatConfig) GetTopP() float64 {
if chatConfig.TopP == 0 && chatConfig.defaultConfig != nil {
return chatConfig.defaultConfig.TopP
}
return chatConfig.TopP
}
func (chatConfig *ChatConfig) GetTools() map[string]any {
if chatConfig.Tools == nil && chatConfig.defaultConfig != nil {
return chatConfig.defaultConfig.Tools
}
return chatConfig.Tools
}
type Usage struct {
AskTokens int64
AnswerTokens int64
TotalTokens int64
UsedTime int64
}
type MessagesMaker struct {
list []ChatMessage
}
func Messages() *MessagesMaker {
return &MessagesMaker{
list: make([]ChatMessage, 0),
}
}
func (m *MessagesMaker) Make() []ChatMessage {
return m.list
}
func (m *MessagesMaker) User(contents ...ChatMessageContent) *MessagesMaker {
m.list = append(m.list, ChatMessage{
Role: RoleUser,
Contents: contents,
})
return m
}
func (m *MessagesMaker) Assistant(contents ...ChatMessageContent) *MessagesMaker {
m.list = append(m.list, ChatMessage{
Role: RoleAssistant,
Contents: contents,
})
return m
}
func (m *MessagesMaker) System(contents ...ChatMessageContent) *MessagesMaker {
m.list = append(m.list, ChatMessage{
Role: RoleSystem,
Contents: contents,
})
return m
}
func (m *MessagesMaker) Tool(contents ...ChatMessageContent) *MessagesMaker {
m.list = append(m.list, ChatMessage{
Role: RoleTool,
Contents: contents,
})
return m
}
func (m *MessagesMaker) Text(text string) *MessagesMaker {
if len(m.list) > 0 {
lastIndex := len(m.list) - 1
m.list[lastIndex].Contents = append(m.list[lastIndex].Contents, ChatMessageContent{
Type: TypeText,
Content: text,
})
}
return m
}
func (m *MessagesMaker) Image(text string) *MessagesMaker {
if len(m.list) > 0 {
lastIndex := len(m.list) - 1
m.list[lastIndex].Contents = append(m.list[lastIndex].Contents, ChatMessageContent{
Type: TypeText,
Content: text,
})
}
return m
}
func (m *MessagesMaker) Video(text string) *MessagesMaker {
if len(m.list) > 0 {
lastIndex := len(m.list) - 1
m.list[lastIndex].Contents = append(m.list[lastIndex].Contents, ChatMessageContent{
Type: TypeText,
Content: text,
})
}
return m
}
func bin2float64(in []byte) []float64 {
buf := bytes.NewBuffer(in)
out := make([]float64, len(in)/4)
for i := 0; i < len(out); i++ {
var f float32
_ = binary.Read(buf, binary.LittleEndian, &f)
out[i] = float64(f)
}
return out
}
func Similarity(buf1, buf2 []byte) float64 {
a := bin2float64(buf1)
b := bin2float64(buf2)
if len(a) != len(b) {
return 0
}
var dotProduct, magnitudeA, magnitudeB float64
for i := 0; i < len(a); i++ {
dotProduct += a[i] * b[i]
magnitudeA += a[i] * a[i]
magnitudeB += b[i] * b[i]
}
magnitudeA = math.Sqrt(magnitudeA)
magnitudeB = math.Sqrt(magnitudeB)
if magnitudeA == 0 || magnitudeB == 0 {
return 0
}
return dotProduct / (magnitudeA * magnitudeB)
}

33
llm/gc.go Normal file
View File

@ -0,0 +1,33 @@
package llm
type GCConfig struct {
defaultConfig *GCConfig
Model string
Size string
Ref string
}
func (gcConfig *GCConfig) SetDefault(config *GCConfig) {
gcConfig.defaultConfig = config
}
func (gcConfig *GCConfig) GetModel() string {
if gcConfig.Model == "" && gcConfig.defaultConfig != nil {
return gcConfig.defaultConfig.Model
}
return gcConfig.Model
}
func (gcConfig *GCConfig) GetSize() string {
if gcConfig.Size == "" && gcConfig.defaultConfig != nil {
return gcConfig.defaultConfig.Size
}
return gcConfig.Size
}
func (gcConfig *GCConfig) GetRef() string {
if gcConfig.Ref == "" && gcConfig.defaultConfig != nil {
return gcConfig.defaultConfig.Ref
}
return gcConfig.Ref
}

100
llm/llm.go Normal file
View File

@ -0,0 +1,100 @@
package llm
import "sync"
const (
TypeText = "text"
TypeImage = "image"
TypeVideo = "video"
RoleSystem = "system"
RoleUser = "user"
RoleAssistant = "assistant"
RoleTool = "tool"
ToolCodeInterpreter = "codeInterpreter"
ToolWebSearch = "webSearch"
)
type Support struct {
Ask bool
AskWithImage bool
AskWithVideo bool
AskWithCodeInterpreter bool
AskWithWebSearch bool
MakeImage bool
MakeVideo bool
Models []string
}
type Config struct {
Endpoint string
ApiKey string
ChatConfig ChatConfig
GCConfig GCConfig
Debug bool
}
type LLM interface {
Support() Support
Ask(messages []ChatMessage, config ChatConfig, callback func(answer string)) (string, Usage, error)
FastAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
LongAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
BatterAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
BestAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
MultiAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
BestMultiAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
CodeInterpreterAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
WebSearchAsk(messages []ChatMessage, callback func(answer string)) (string, Usage, error)
MakeImage(prompt string, config GCConfig) ([]string, Usage, error)
FastMakeImage(prompt string, config GCConfig) ([]string, Usage, error)
BestMakeImage(prompt string, config GCConfig) ([]string, Usage, error)
MakeVideo(prompt string, config GCConfig) ([]string, []string, Usage, error)
FastMakeVideo(prompt string, config GCConfig) ([]string, []string, Usage, error)
BestMakeVideo(prompt string, config GCConfig) ([]string, []string, Usage, error)
Embedding(text string, model string) ([]byte, Usage, error)
FastEmbedding(text string) ([]byte, Usage, error)
BestEmbedding(text string) ([]byte, Usage, error)
}
var llmMakers = map[string]func(Config) LLM{}
var llmMakersLock = sync.RWMutex{}
var llms = map[string]LLM{}
var llmsLock = sync.RWMutex{}
func Register(llmId string, maker func(Config) LLM) {
llmMakersLock.Lock()
llmMakers[llmId] = maker
llmMakersLock.Unlock()
}
func Create(name, llmId string, config Config) LLM {
llmMakersLock.RLock()
maker := llmMakers[llmId]
llmMakersLock.RUnlock()
if maker != nil {
llm := maker(config)
llmsLock.Lock()
llms[name] = llm
llmsLock.Unlock()
return llm
}
return nil
}
func Get(name string) LLM {
llmsLock.RLock()
llm := llms[name]
llmsLock.RUnlock()
return llm
}
func List() map[string]LLM {
list := map[string]LLM{}
llmsLock.RLock()
for name, llm := range llms {
list[name] = llm
}
llmsLock.RUnlock()
return list
}

39
llm_test.go Normal file
View File

@ -0,0 +1,39 @@
package llm
import (
_ "apigo.cc/ai/llm/zhipu"
"apigo.cc/apigo/gojs"
_ "apigo.cc/apigo/gojs/modules"
"fmt"
"github.com/ssgo/u"
"testing"
)
//func TestZhipu(t *testing.T) {
// zp := llm.Create("zp", "zhipu", llm.Config{
// ApiKey: "112dba40872699df44e07355f7de0c0c.ObK2rslHYBuxII5J",
// })
// r, usage, err := zp.FastAsk(llm.Messages().User().Text("你好").Make(), nil) //func(r string) {
// // fmt.Print(r)
// //})
//
// fmt.Println(11, r)
// fmt.Println(22, usage)
// fmt.Println(33, err)
//}
func TestExport(t *testing.T) {
gojs.ExportForDev()
}
func TestLLM(t *testing.T) {
r, err := gojs.RunFile("test.js")
if err != nil {
t.Fatal(err)
}
if r == nil {
t.Fatal("no answer")
}
fmt.Println()
fmt.Println(u.BCyan(u.JsonP(r)))
}

230
openai/chat.go Normal file
View File

@ -0,0 +1,230 @@
package openai
import (
"apigo.cc/ai/llm/llm"
"bytes"
"context"
"encoding/binary"
"fmt"
"github.com/sashabaranov/go-openai"
"github.com/ssgo/log"
"github.com/ssgo/u"
"strings"
"time"
)
func (lm *LLM) FastAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4o_mini_2024_07_18,
}, callback)
}
func (lm *LLM) LongAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4_32k_0613,
}, callback)
}
func (lm *LLM) BatterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4_turbo,
}, callback)
}
func (lm *LLM) BestAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4o_2024_08_06,
}, callback)
}
func (lm *LLM) MultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4o_mini_2024_07_18,
}, callback)
}
func (lm *LLM) BestMultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4o_2024_08_06,
}, callback)
}
func (lm *LLM) CodeInterpreterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4o,
Tools: map[string]any{llm.ToolCodeInterpreter: nil},
}, callback)
}
func (lm *LLM) WebSearchAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGPT_4o_mini_2024_07_18,
Tools: map[string]any{llm.ToolWebSearch: nil},
}, callback)
}
func (lm *LLM) Ask(messages []llm.ChatMessage, config llm.ChatConfig, callback func(answer string)) (string, llm.Usage, error) {
openaiConf := openai.DefaultConfig(lm.config.ApiKey)
if lm.config.Endpoint != "" {
openaiConf.BaseURL = lm.config.Endpoint
}
config.SetDefault(&lm.config.ChatConfig)
agentMessages := make([]openai.ChatCompletionMessage, len(messages))
for i, msg := range messages {
var contents []openai.ChatMessagePart
if msg.Contents != nil {
contents = make([]openai.ChatMessagePart, len(msg.Contents))
for j, inPart := range msg.Contents {
part := openai.ChatMessagePart{}
part.Type = TypeMap[inPart.Type]
switch inPart.Type {
case llm.TypeText:
part.Text = inPart.Content
case llm.TypeImage:
part.ImageURL = &openai.ChatMessageImageURL{
URL: inPart.Content,
Detail: openai.ImageURLDetailAuto,
}
}
contents[j] = part
}
}
if len(contents) == 1 && contents[0].Type == llm.TypeText {
agentMessages[i] = openai.ChatCompletionMessage{
Role: RoleMap[msg.Role],
Content: contents[0].Text,
}
} else {
agentMessages[i] = openai.ChatCompletionMessage{
Role: RoleMap[msg.Role],
MultiContent: contents,
}
}
}
opt := openai.ChatCompletionRequest{
Model: config.GetModel(),
Messages: agentMessages,
MaxTokens: config.GetMaxTokens(),
Temperature: float32(config.GetTemperature()),
TopP: float32(config.GetTopP()),
StreamOptions: &openai.StreamOptions{
IncludeUsage: true,
},
}
for name := range config.GetTools() {
switch name {
case llm.ToolCodeInterpreter:
opt.Tools = append(opt.Tools, openai.Tool{Type: "code_interpreter"})
case llm.ToolWebSearch:
}
}
c := openai.NewClientWithConfig(openaiConf)
if callback != nil {
opt.Stream = true
r, err := c.CreateChatCompletionStream(context.Background(), opt)
if err == nil {
results := make([]string, 0)
usage := llm.Usage{}
for {
if r2, err := r.Recv(); err == nil {
if r2.Choices != nil {
for _, ch := range r2.Choices {
text := ch.Delta.Content
callback(text)
results = append(results, text)
}
}
if r2.Usage != nil {
usage.AskTokens += int64(r2.Usage.PromptTokens)
usage.AnswerTokens += int64(r2.Usage.CompletionTokens)
usage.TotalTokens += int64(r2.Usage.TotalTokens)
}
} else {
break
}
}
_ = r.Close()
return strings.Join(results, ""), usage, nil
} else {
log.DefaultLogger.Error(err.Error())
return "", llm.Usage{}, err
}
} else {
t1 := time.Now().UnixMilli()
if r, err := c.CreateChatCompletion(context.Background(), opt); err == nil {
t2 := time.Now().UnixMilli() - t1
results := make([]string, 0)
if r.Choices != nil {
for _, ch := range r.Choices {
results = append(results, ch.Message.Content)
}
}
return strings.Join(results, ""), llm.Usage{
AskTokens: int64(r.Usage.PromptTokens),
AnswerTokens: int64(r.Usage.CompletionTokens),
TotalTokens: int64(r.Usage.TotalTokens),
UsedTime: t2,
}, nil
} else {
//fmt.Println(u.BMagenta(err.Error()), u.BMagenta(u.JsonP(r)))
return "", llm.Usage{}, err
}
}
}
func (lm *LLM) FastEmbedding(text string) ([]byte, llm.Usage, error) {
return lm.Embedding(text, string(openai.AdaEmbeddingV2))
}
func (lm *LLM) BestEmbedding(text string) ([]byte, llm.Usage, error) {
return lm.Embedding(text, string(openai.LargeEmbedding3))
}
func (lm *LLM) Embedding(text, model string) ([]byte, llm.Usage, error) {
fmt.Println(111, model, text)
openaiConf := openai.DefaultConfig(lm.config.ApiKey)
if lm.config.Endpoint != "" {
openaiConf.BaseURL = lm.config.Endpoint
}
c := openai.NewClientWithConfig(openaiConf)
req := openai.EmbeddingRequest{
Input: text,
Model: openai.EmbeddingModel(model),
User: "",
EncodingFormat: "",
Dimensions: 0,
}
if lm.config.Debug {
fmt.Println(u.JsonP(req))
}
t1 := time.Now().UnixMilli()
if r, err := c.CreateEmbeddings(context.Background(), req); err == nil {
t2 := time.Now().UnixMilli() - t1
buf := new(bytes.Buffer)
if r.Data != nil {
for _, ch := range r.Data {
for _, v := range ch.Embedding {
_ = binary.Write(buf, binary.LittleEndian, v)
}
}
}
fmt.Println(len(buf.Bytes()))
return buf.Bytes(), llm.Usage{
AskTokens: int64(r.Usage.PromptTokens),
AnswerTokens: int64(r.Usage.CompletionTokens),
TotalTokens: int64(r.Usage.TotalTokens),
UsedTime: t2,
}, nil
} else {
fmt.Println(err.Error())
return nil, llm.Usage{}, err
}
}

81
openai/config.go Normal file
View File

@ -0,0 +1,81 @@
package openai
import (
"apigo.cc/ai/llm/llm"
"github.com/sashabaranov/go-openai"
)
type LLM struct {
config llm.Config
}
var TypeMap = map[string]openai.ChatMessagePartType{
llm.TypeText: openai.ChatMessagePartTypeText,
llm.TypeImage: openai.ChatMessagePartTypeImageURL,
//llm.TypeVideo: "video_url",
}
var RoleMap = map[string]string{
llm.RoleSystem: openai.ChatMessageRoleSystem,
llm.RoleUser: openai.ChatMessageRoleUser,
llm.RoleAssistant: openai.ChatMessageRoleAssistant,
llm.RoleTool: openai.ChatMessageRoleTool,
}
const (
ModelGPT_4_32k_0613 = "gpt-4-32k-0613"
ModelGPT_4_32k_0314 = "gpt-4-32k-0314"
ModelGPT_4_32k = "gpt-4-32k"
ModelGPT_4_0613 = "gpt-4-0613"
ModelGPT_4_0314 = "gpt-4-0314"
ModelGPT_4o = "gpt-4o"
ModelGPT_4o_2024_05_13 = "gpt-4o-2024-05-13"
ModelGPT_4o_2024_08_06 = "gpt-4o-2024-08-06"
ModelGPT_4o_mini = "gpt-4o-mini"
ModelGPT_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18"
ModelGPT_4_turbo = "gpt-4-turbo"
ModelGPT_4_turbo_2024_04_09 = "gpt-4-turbo-2024-04-09"
ModelGPT_4_0125_preview = "gpt-4-0125-preview"
ModelGPT_4_1106_preview = "gpt-4-1106-preview"
ModelGPT_4_turbo_preview = "gpt-4-turbo-preview"
ModelGPT_4_vision_preview = "gpt-4-vision-preview"
ModelGPT_4 = "gpt-4"
ModelGPT_3_5_turbo_0125 = "gpt-3.5-turbo-0125"
ModelGPT_3_5_turbo_1106 = "gpt-3.5-turbo-1106"
ModelGPT_3_5_turbo_0613 = "gpt-3.5-turbo-0613"
ModelGPT_3_5_turbo_0301 = "gpt-3.5-turbo-0301"
ModelGPT_3_5_turbo_16k = "gpt-3.5-turbo-16k"
ModelGPT_3_5_turbo_16k_0613 = "gpt-3.5-turbo-16k-0613"
ModelGPT_3_5_turbo = "gpt-3.5-turbo"
ModelGPT_3_5_turbo_instruct = "gpt-3.5-turbo-instruct"
ModelDavinci_002 = "davinci-002"
ModelCurie = "curie"
ModelCurie_002 = "curie-002"
ModelAda_002 = "ada-002"
ModelBabbage_002 = "babbage-002"
ModelCode_davinci_002 = "code-davinci-002"
ModelCode_cushman_001 = "code-cushman-001"
ModelCode_davinci_001 = "code-davinci-001"
ModelDallE2Std = "dall-e-2"
ModelDallE2HD = "dall-e-2-hd"
ModelDallE3Std = "dall-e-3"
ModelDallE3HD = "dall-e-3-hd"
)
func (ag *LLM) Support() llm.Support {
return llm.Support{
Ask: true,
AskWithImage: true,
AskWithVideo: false,
AskWithCodeInterpreter: true,
AskWithWebSearch: false,
MakeImage: true,
MakeVideo: false,
Models: []string{ModelGPT_4_32k_0613, ModelGPT_4_32k_0314, ModelGPT_4_32k, ModelGPT_4_0613, ModelGPT_4_0314, ModelGPT_4o, ModelGPT_4o_2024_05_13, ModelGPT_4o_2024_08_06, ModelGPT_4o_mini, ModelGPT_4o_mini_2024_07_18, ModelGPT_4_turbo, ModelGPT_4_turbo_2024_04_09, ModelGPT_4_0125_preview, ModelGPT_4_1106_preview, ModelGPT_4_turbo_preview, ModelGPT_4_vision_preview, ModelGPT_4, ModelGPT_3_5_turbo_0125, ModelGPT_3_5_turbo_1106, ModelGPT_3_5_turbo_0613, ModelGPT_3_5_turbo_0301, ModelGPT_3_5_turbo_16k, ModelGPT_3_5_turbo_16k_0613, ModelGPT_3_5_turbo, ModelGPT_3_5_turbo_instruct, ModelDavinci_002, ModelCurie, ModelCurie_002, ModelAda_002, ModelBabbage_002, ModelCode_davinci_002, ModelCode_cushman_001, ModelCode_davinci_001, ModelDallE2Std, ModelDallE2HD, ModelDallE3Std, ModelDallE3HD},
}
}
func init() {
llm.Register("openai", func(config llm.Config) llm.LLM {
return &LLM{config: config}
})
}

83
openai/gc.go Normal file
View File

@ -0,0 +1,83 @@
package openai
import (
"apigo.cc/ai/llm/llm"
"context"
"github.com/sashabaranov/go-openai"
"strings"
"time"
)
// func (lm *LLM) FastMakeImage(prompt, size, refImage string) ([]string, llm.Usage, error) {
// return lm.MakeImage(ModelDallE3Std, prompt, size, refImage)
// }
//
// func (lm *LLM) BestMakeImage(prompt, size, refImage string) ([]string, llm.Usage, error) {
// return lm.MakeImage(ModelDallE3HD, prompt, size, refImage)
// }
//
// func (lm *LLM) MakeImage(model, prompt, size, refImage string) ([]string, llm.Usage, error) {
func (lm *LLM) FastMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
config.Model = ModelDallE3Std
return lm.MakeImage(prompt, config)
}
func (lm *LLM) BestMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
config.Model = ModelDallE3HD
return lm.MakeImage(prompt, config)
}
func (lm *LLM) MakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
openaiConf := openai.DefaultConfig(lm.config.ApiKey)
if lm.config.Endpoint != "" {
openaiConf.BaseURL = lm.config.Endpoint
}
config.SetDefault(&lm.config.GCConfig)
c := openai.NewClientWithConfig(openaiConf)
style := openai.CreateImageStyleVivid
if (!strings.Contains(prompt, "vivid") || !strings.Contains(prompt, "生动的")) && (strings.Contains(prompt, "natural") || strings.Contains(prompt, "自然的")) {
style = openai.CreateImageStyleNatural
}
quality := openai.CreateImageQualityStandard
model := config.GetModel()
if strings.HasSuffix(model, "-hd") {
quality = openai.CreateImageQualityHD
model = model[0 : len(model)-3]
}
t1 := time.Now().UnixMilli()
r, err := c.CreateImage(context.Background(), openai.ImageRequest{
Prompt: prompt,
Model: model,
Quality: quality,
Size: config.GetSize(),
Style: style,
ResponseFormat: openai.CreateImageResponseFormatURL,
})
t2 := time.Now().UnixMilli() - t1
if err == nil {
results := make([]string, 0)
for _, item := range r.Data {
results = append(results, item.URL)
}
return results, llm.Usage{
AskTokens: 0,
AnswerTokens: 0,
TotalTokens: 0,
UsedTime: t2,
}, nil
} else {
return nil, llm.Usage{}, err
}
}
func (lm *LLM) FastMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
return lm.MakeVideo(prompt, config)
}
func (lm *LLM) BestMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
return lm.MakeVideo(prompt, config)
}
func (lm *LLM) MakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
return nil, nil, llm.Usage{}, nil
}

4
test.js Normal file
View File

@ -0,0 +1,4 @@
import {zhipu, openai} from "llm"
import co from "console"
return zhipu.fastAsk('你好', co.print)

195
zhipu/chat.go Normal file
View File

@ -0,0 +1,195 @@
package zhipu
import (
"apigo.cc/ai/llm/llm"
"bytes"
"context"
"encoding/binary"
"fmt"
"github.com/ssgo/u"
"github.com/yankeguo/zhipu"
"strings"
"time"
)
func (lm *LLM) FastAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM4Flash,
}, callback)
}
func (lm *LLM) LongAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM4Long,
}, callback)
}
func (lm *LLM) BatterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM4Plus,
}, callback)
}
func (lm *LLM) BestAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM40520,
}, callback)
}
func (lm *LLM) MultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM4VPlus,
}, callback)
}
func (lm *LLM) BestMultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM4V,
}, callback)
}
func (lm *LLM) CodeInterpreterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM4AllTools,
Tools: map[string]any{llm.ToolCodeInterpreter: nil},
}, callback)
}
func (lm *LLM) WebSearchAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
return lm.Ask(messages, llm.ChatConfig{
Model: ModelGLM4AllTools,
Tools: map[string]any{llm.ToolWebSearch: nil},
}, callback)
}
func (lm *LLM) Ask(messages []llm.ChatMessage, config llm.ChatConfig, callback func(answer string)) (string, llm.Usage, error) {
config.SetDefault(&lm.config.ChatConfig)
c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
if err != nil {
return "", llm.Usage{}, err
}
cc := c.ChatCompletion(config.GetModel())
for _, msg := range messages {
var contents []zhipu.ChatCompletionMultiContent
if msg.Contents != nil {
contents = make([]zhipu.ChatCompletionMultiContent, len(msg.Contents))
for j, inPart := range msg.Contents {
part := zhipu.ChatCompletionMultiContent{}
part.Type = NameMap[inPart.Type]
switch inPart.Type {
case llm.TypeText:
part.Text = inPart.Content
case llm.TypeImage:
part.ImageURL = &zhipu.URLItem{URL: inPart.Content}
//case llm.TypeVideo:
// part.VideoURL = &zhipu.URLItem{URL: inPart.Content}
}
contents[j] = part
}
}
if len(contents) == 1 && contents[0].Type == llm.TypeText {
cc.AddMessage(zhipu.ChatCompletionMessage{
Role: NameMap[msg.Role],
Content: contents[0].Text,
})
} else {
cc.AddMessage(zhipu.ChatCompletionMultiMessage{
Role: NameMap[msg.Role],
Content: contents,
})
}
}
for name := range config.GetTools() {
switch name {
case llm.ToolCodeInterpreter:
cc.AddTool(zhipu.ChatCompletionToolCodeInterpreter{})
case llm.ToolWebSearch:
cc.AddTool(zhipu.ChatCompletionToolWebBrowser{})
}
}
if config.GetMaxTokens() != 0 {
cc.SetMaxTokens(config.GetMaxTokens())
}
if config.GetTemperature() != 0 {
cc.SetTemperature(config.GetTemperature())
}
if config.GetTopP() != 0 {
cc.SetTopP(config.GetTopP())
}
if callback != nil {
cc.SetStreamHandler(func(r2 zhipu.ChatCompletionResponse) error {
if r2.Choices != nil {
for _, ch := range r2.Choices {
text := ch.Delta.Content
callback(text)
}
}
return nil
})
}
if lm.config.Debug {
fmt.Println(cc.BatchMethod(), cc.BatchURL())
fmt.Println(u.JsonP(cc.BatchBody()))
}
t1 := time.Now().UnixMilli()
if r, err := cc.Do(context.Background()); err == nil {
t2 := time.Now().UnixMilli() - t1
results := make([]string, 0)
if r.Choices != nil {
for _, ch := range r.Choices {
results = append(results, ch.Message.Content)
}
}
return strings.Join(results, ""), llm.Usage{
AskTokens: r.Usage.PromptTokens,
AnswerTokens: r.Usage.CompletionTokens,
TotalTokens: r.Usage.TotalTokens,
UsedTime: t2,
}, nil
} else {
return "", llm.Usage{}, err
}
}
func (lm *LLM) FastEmbedding(text string) ([]byte, llm.Usage, error) {
return lm.Embedding(text, ModelEmbedding3)
}
func (lm *LLM) BestEmbedding(text string) ([]byte, llm.Usage, error) {
return lm.Embedding(text, ModelEmbedding3)
}
func (lm *LLM) Embedding(text, model string) ([]byte, llm.Usage, error) {
c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
if err != nil {
return nil, llm.Usage{}, err
}
cc := c.Embedding(model)
cc.SetInput(text)
t1 := time.Now().UnixMilli()
if r, err := cc.Do(context.Background()); err == nil {
t2 := time.Now().UnixMilli() - t1
buf := new(bytes.Buffer)
if r.Data != nil {
for _, ch := range r.Data {
for _, v := range ch.Embedding {
_ = binary.Write(buf, binary.LittleEndian, float32(v))
}
}
}
return buf.Bytes(), llm.Usage{
AskTokens: r.Usage.PromptTokens,
AnswerTokens: r.Usage.CompletionTokens,
TotalTokens: r.Usage.TotalTokens,
UsedTime: t2,
}, nil
} else {
return nil, llm.Usage{}, err
}
}

60
zhipu/config.go Normal file
View File

@ -0,0 +1,60 @@
package zhipu
import (
"apigo.cc/ai/llm/llm"
"github.com/yankeguo/zhipu"
)
type LLM struct {
config llm.Config
}
var NameMap = map[string]string{
llm.TypeText: zhipu.MultiContentTypeText,
llm.TypeImage: zhipu.MultiContentTypeImageURL,
//llm.TypeVideo: zhipu.MultiContentTypeVideoURL,
llm.RoleSystem: zhipu.RoleSystem,
llm.RoleUser: zhipu.RoleUser,
llm.RoleAssistant: zhipu.RoleAssistant,
llm.RoleTool: zhipu.RoleTool,
}
const (
ModelGLM4Plus = "GLM-4-Plus"
ModelGLM40520 = "GLM-4-0520"
ModelGLM4Long = "GLM-4-Long"
ModelGLM4AirX = "GLM-4-AirX"
ModelGLM4Air = "GLM-4-Air"
ModelGLM4Flash = "GLM-4-Flash"
ModelGLM4AllTools = "GLM-4-AllTools"
ModelGLM4 = "GLM-4"
ModelGLM4VPlus = "GLM-4V-Plus"
ModelGLM4V = "GLM-4V"
ModelCogVideoX = "CogVideoX"
ModelCogView3Plus = "CogView-3-Plus"
ModelCogView3 = "CogView-3"
ModelEmbedding3 = "Embedding-3"
ModelEmbedding2 = "Embedding-2"
ModelCharGLM3 = "CharGLM-3"
ModelEmohaa = "Emohaa"
ModelCodeGeeX4 = "CodeGeeX-4"
)
func (lm *LLM) Support() llm.Support {
return llm.Support{
Ask: true,
AskWithImage: true,
AskWithVideo: true,
AskWithCodeInterpreter: true,
AskWithWebSearch: true,
MakeImage: true,
MakeVideo: true,
Models: []string{ModelGLM4Plus, ModelGLM40520, ModelGLM4Long, ModelGLM4AirX, ModelGLM4Air, ModelGLM4Flash, ModelGLM4AllTools, ModelGLM4, ModelGLM4VPlus, ModelGLM4V, ModelCogVideoX, ModelCogView3Plus, ModelCogView3, ModelEmbedding3, ModelEmbedding2, ModelCharGLM3, ModelEmohaa, ModelCodeGeeX4},
}
}
func init() {
llm.Register("zhipu", func(config llm.Config) llm.LLM {
return &LLM{config: config}
})
}

94
zhipu/gc.go Normal file
View File

@ -0,0 +1,94 @@
package zhipu
import (
"apigo.cc/ai/llm/llm"
"context"
"errors"
"github.com/yankeguo/zhipu"
"time"
)
func (lm *LLM) FastMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
config.Model = ModelCogView3Plus
return lm.MakeImage(prompt, config)
}
func (lm *LLM) BestMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
config.Model = ModelCogView3
return lm.MakeImage(prompt, config)
}
func (lm *LLM) MakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
if err != nil {
return nil, llm.Usage{}, err
}
config.SetDefault(&lm.config.GCConfig)
cc := c.ImageGeneration(config.Model).SetPrompt(prompt)
//cc.SetSize(config.GetSize())
t1 := time.Now().UnixMilli()
if r, err := cc.Do(context.Background()); err == nil {
t2 := time.Now().UnixMilli() - t1
results := make([]string, 0)
for _, item := range r.Data {
results = append(results, item.URL)
}
return results, llm.Usage{
UsedTime: t2,
}, nil
} else {
return nil, llm.Usage{}, err
}
}
func (lm *LLM) FastMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
config.Model = ModelCogVideoX
return lm.MakeVideo(prompt, config)
}
func (lm *LLM) BestMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
config.Model = ModelCogVideoX
return lm.MakeVideo(prompt, config)
}
func (lm *LLM) MakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
if err != nil {
return nil, nil, llm.Usage{}, err
}
config.SetDefault(&lm.config.GCConfig)
cc := c.VideoGeneration(config.Model).SetPrompt(prompt)
cc.SetImageURL(config.GetRef())
t1 := time.Now().UnixMilli()
if resp, err := cc.Do(context.Background()); err == nil {
t2 := time.Now().UnixMilli() - t1
for i := 0; i < 1200; i++ {
r, err := c.AsyncResult(resp.ID).Do(context.Background())
if err != nil {
return nil, nil, llm.Usage{}, err
}
if r.TaskStatus == zhipu.VideoGenerationTaskStatusSuccess {
covers := make([]string, 0)
results := make([]string, 0)
for _, item := range r.VideoResult {
results = append(results, item.URL)
covers = append(covers, item.CoverImageURL)
}
return results, covers, llm.Usage{
UsedTime: t2,
}, nil
}
if r.TaskStatus == zhipu.VideoGenerationTaskStatusFail {
return nil, nil, llm.Usage{}, errors.New("fail on task " + resp.ID)
}
time.Sleep(3 * time.Second)
}
return nil, nil, llm.Usage{}, errors.New("timeout on task " + resp.ID)
} else {
return nil, nil, llm.Usage{}, err
}
}