split zhipu and openai
This commit is contained in:
		
							parent
							
								
									0f8d3a157f
								
							
						
					
					
						commit
						2931239ee4
					
				
							
								
								
									
										29
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										29
									
								
								go.mod
									
									
									
									
									
								
							@ -1,39 +1,24 @@
 | 
			
		||||
module apigo.cc/ai/llm
 | 
			
		||||
 | 
			
		||||
go 1.18
 | 
			
		||||
go 1.22
 | 
			
		||||
 | 
			
		||||
toolchain go1.22.5
 | 
			
		||||
 | 
			
		||||
require (
 | 
			
		||||
	apigo.cc/gojs v0.0.1
 | 
			
		||||
	apigo.cc/gojs v0.0.3
 | 
			
		||||
	apigo.cc/gojs/console v0.0.1
 | 
			
		||||
	apigo.cc/gojs/db v0.0.1
 | 
			
		||||
	apigo.cc/gojs/file v0.0.1
 | 
			
		||||
	apigo.cc/gojs/http v0.0.1
 | 
			
		||||
	apigo.cc/gojs/log v0.0.1
 | 
			
		||||
	apigo.cc/gojs/util v0.0.2
 | 
			
		||||
	github.com/sashabaranov/go-openai v1.32.0
 | 
			
		||||
	github.com/ssgo/config v1.7.7
 | 
			
		||||
	github.com/ssgo/log v1.7.7
 | 
			
		||||
	github.com/ssgo/config v1.7.8
 | 
			
		||||
	github.com/ssgo/u v1.7.9
 | 
			
		||||
	github.com/yankeguo/zhipu v0.1.2
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
require (
 | 
			
		||||
	filippo.io/edwards25519 v1.1.0 // indirect
 | 
			
		||||
	github.com/dlclark/regexp2 v1.11.4 // indirect
 | 
			
		||||
	github.com/fsnotify/fsnotify v1.7.0 // indirect
 | 
			
		||||
	github.com/go-resty/resty/v2 v2.14.0 // indirect
 | 
			
		||||
	github.com/go-sourcemap/sourcemap v2.1.4+incompatible // indirect
 | 
			
		||||
	github.com/go-sql-driver/mysql v1.8.1 // indirect
 | 
			
		||||
	github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
 | 
			
		||||
	github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect
 | 
			
		||||
	github.com/gorilla/websocket v1.5.3 // indirect
 | 
			
		||||
	github.com/mitchellh/mapstructure v1.5.0 // indirect
 | 
			
		||||
	github.com/ssgo/dao v0.1.5 // indirect
 | 
			
		||||
	github.com/ssgo/db v1.7.9 // indirect
 | 
			
		||||
	github.com/ssgo/httpclient v1.7.8 // indirect
 | 
			
		||||
	github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
 | 
			
		||||
	github.com/ssgo/log v1.7.7 // indirect
 | 
			
		||||
	github.com/ssgo/standard v1.7.7 // indirect
 | 
			
		||||
	github.com/ssgo/tool v0.4.27 // indirect
 | 
			
		||||
	golang.org/x/net v0.30.0 // indirect
 | 
			
		||||
	golang.org/x/sys v0.26.0 // indirect
 | 
			
		||||
	golang.org/x/text v0.19.0 // indirect
 | 
			
		||||
	gopkg.in/yaml.v3 v3.0.1 // indirect
 | 
			
		||||
 | 
			
		||||
@ -1,89 +0,0 @@
 | 
			
		||||
package main
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"os"
 | 
			
		||||
 | 
			
		||||
	_ "apigo.cc/ai/llm"
 | 
			
		||||
	_ "apigo.cc/ai/llm/openai"
 | 
			
		||||
	_ "apigo.cc/ai/llm/zhipu"
 | 
			
		||||
	"apigo.cc/gojs"
 | 
			
		||||
	_ "apigo.cc/gojs/console"
 | 
			
		||||
	_ "apigo.cc/gojs/db"
 | 
			
		||||
	_ "apigo.cc/gojs/file"
 | 
			
		||||
	_ "apigo.cc/gojs/http"
 | 
			
		||||
	_ "apigo.cc/gojs/log"
 | 
			
		||||
	_ "apigo.cc/gojs/util"
 | 
			
		||||
	"github.com/ssgo/u"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	gojs.Alias("llm", "apigo.cc/ai/llm")
 | 
			
		||||
	gojs.Alias("console", "apigo.cc/gojs/console")
 | 
			
		||||
	gojs.Alias("db", "apigo.cc/gojs/db")
 | 
			
		||||
	gojs.Alias("file", "apigo.cc/gojs/file")
 | 
			
		||||
	gojs.Alias("http", "apigo.cc/gojs/http")
 | 
			
		||||
	gojs.Alias("log", "apigo.cc/gojs/log")
 | 
			
		||||
	gojs.Alias("util", "apigo.cc/gojs/util")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func main() {
 | 
			
		||||
	args := os.Args[1:]
 | 
			
		||||
 | 
			
		||||
	if len(args) > 0 && (args[0] == "-e" || args[0] == "export") {
 | 
			
		||||
		imports := gojs.ExportForDev()
 | 
			
		||||
		fmt.Println(`exported to node_modules
 | 
			
		||||
 | 
			
		||||
test.js:
 | 
			
		||||
`)
 | 
			
		||||
		fmt.Println(u.Cyan(imports + `
 | 
			
		||||
 | 
			
		||||
function main(...args) {
 | 
			
		||||
	let r = llm.zhipu.fastAsk(args.length>0?args[0]:'你好', console.print)
 | 
			
		||||
	console.println()
 | 
			
		||||
	return r
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
run:
 | 
			
		||||
 | 
			
		||||
llm-cli test.js 你是谁
 | 
			
		||||
 | 
			
		||||
`))
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	isWatch := false
 | 
			
		||||
	if len(args) > 0 && args[0] == "-w" {
 | 
			
		||||
		isWatch = true
 | 
			
		||||
		args = args[1:]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	jsFile := ""
 | 
			
		||||
	if len(args) > 0 {
 | 
			
		||||
		jsFile = args[0]
 | 
			
		||||
		args = args[1:]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if jsFile != "" && u.FileExists(jsFile) {
 | 
			
		||||
		if isWatch {
 | 
			
		||||
			if w, err := gojs.WatchRun(jsFile, nil, nil, u.ToInterfaceArray(args)...); err == nil {
 | 
			
		||||
				w.WaitForKill()
 | 
			
		||||
			} else {
 | 
			
		||||
				fmt.Println(u.BRed(err.Error()))
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			if _, err := gojs.RunFile(jsFile, u.ToInterfaceArray(args)...); err != nil {
 | 
			
		||||
				fmt.Println(u.BRed(err.Error()))
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	fmt.Println(`Usage:
 | 
			
		||||
llm-cli -h | help		show usage
 | 
			
		||||
llm-cli -e | export		export ai.ts file for develop
 | 
			
		||||
llm-cli test.js			run test.js, if not specified, run ai.js
 | 
			
		||||
llm-cli -w | watch test.js	run test.js, if .js files changed will be reloaded
 | 
			
		||||
`)
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
@ -4,7 +4,8 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	_ "apigo.cc/ai/llm/zhipu"
 | 
			
		||||
	// uncomment apigo.cc/ai/zhipu for test
 | 
			
		||||
	// _ "apigo.cc/ai/zhipu"
 | 
			
		||||
	"apigo.cc/gojs"
 | 
			
		||||
	_ "apigo.cc/gojs/console"
 | 
			
		||||
	"github.com/ssgo/u"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										230
									
								
								openai/chat.go
									
									
									
									
									
								
							
							
						
						
									
										230
									
								
								openai/chat.go
									
									
									
									
									
								
							@ -1,230 +0,0 @@
 | 
			
		||||
package openai
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"apigo.cc/ai/llm/llm"
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/sashabaranov/go-openai"
 | 
			
		||||
	"github.com/ssgo/log"
 | 
			
		||||
	"github.com/ssgo/u"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) FastAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4o_mini_2024_07_18,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) LongAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4_32k_0613,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BatterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4_turbo,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4o_2024_08_06,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) MultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4o_mini_2024_07_18,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestMultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4o_2024_08_06,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) CodeInterpreterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4o,
 | 
			
		||||
		Tools: map[string]any{llm.ToolCodeInterpreter: nil},
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) WebSearchAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGPT_4o_mini_2024_07_18,
 | 
			
		||||
		Tools: map[string]any{llm.ToolWebSearch: nil},
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) Ask(messages []llm.ChatMessage, config llm.ChatConfig, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	openaiConf := openai.DefaultConfig(lm.config.ApiKey)
 | 
			
		||||
	if lm.config.Endpoint != "" {
 | 
			
		||||
		openaiConf.BaseURL = lm.config.Endpoint
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	config.SetDefault(&lm.config.ChatConfig)
 | 
			
		||||
 | 
			
		||||
	agentMessages := make([]openai.ChatCompletionMessage, len(messages))
 | 
			
		||||
	for i, msg := range messages {
 | 
			
		||||
		var contents []openai.ChatMessagePart
 | 
			
		||||
		if msg.Contents != nil {
 | 
			
		||||
			contents = make([]openai.ChatMessagePart, len(msg.Contents))
 | 
			
		||||
			for j, inPart := range msg.Contents {
 | 
			
		||||
				part := openai.ChatMessagePart{}
 | 
			
		||||
				part.Type = TypeMap[inPart.Type]
 | 
			
		||||
				switch inPart.Type {
 | 
			
		||||
				case llm.TypeText:
 | 
			
		||||
					part.Text = inPart.Content
 | 
			
		||||
				case llm.TypeImage:
 | 
			
		||||
					part.ImageURL = &openai.ChatMessageImageURL{
 | 
			
		||||
						URL:    inPart.Content,
 | 
			
		||||
						Detail: openai.ImageURLDetailAuto,
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				contents[j] = part
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if len(contents) == 1 && contents[0].Type == llm.TypeText {
 | 
			
		||||
			agentMessages[i] = openai.ChatCompletionMessage{
 | 
			
		||||
				Role:    RoleMap[msg.Role],
 | 
			
		||||
				Content: contents[0].Text,
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			agentMessages[i] = openai.ChatCompletionMessage{
 | 
			
		||||
				Role:         RoleMap[msg.Role],
 | 
			
		||||
				MultiContent: contents,
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	opt := openai.ChatCompletionRequest{
 | 
			
		||||
		Model:       config.GetModel(),
 | 
			
		||||
		Messages:    agentMessages,
 | 
			
		||||
		MaxTokens:   config.GetMaxTokens(),
 | 
			
		||||
		Temperature: float32(config.GetTemperature()),
 | 
			
		||||
		TopP:        float32(config.GetTopP()),
 | 
			
		||||
		StreamOptions: &openai.StreamOptions{
 | 
			
		||||
			IncludeUsage: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for name := range config.GetTools() {
 | 
			
		||||
		switch name {
 | 
			
		||||
		case llm.ToolCodeInterpreter:
 | 
			
		||||
			opt.Tools = append(opt.Tools, openai.Tool{Type: "code_interpreter"})
 | 
			
		||||
		case llm.ToolWebSearch:
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c := openai.NewClientWithConfig(openaiConf)
 | 
			
		||||
	if callback != nil {
 | 
			
		||||
		opt.Stream = true
 | 
			
		||||
		r, err := c.CreateChatCompletionStream(context.Background(), opt)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			results := make([]string, 0)
 | 
			
		||||
			usage := llm.Usage{}
 | 
			
		||||
			for {
 | 
			
		||||
				if r2, err := r.Recv(); err == nil {
 | 
			
		||||
					if r2.Choices != nil {
 | 
			
		||||
						for _, ch := range r2.Choices {
 | 
			
		||||
							text := ch.Delta.Content
 | 
			
		||||
							callback(text)
 | 
			
		||||
							results = append(results, text)
 | 
			
		||||
						}
 | 
			
		||||
					}
 | 
			
		||||
					if r2.Usage != nil {
 | 
			
		||||
						usage.AskTokens += int64(r2.Usage.PromptTokens)
 | 
			
		||||
						usage.AnswerTokens += int64(r2.Usage.CompletionTokens)
 | 
			
		||||
						usage.TotalTokens += int64(r2.Usage.TotalTokens)
 | 
			
		||||
					}
 | 
			
		||||
				} else {
 | 
			
		||||
					break
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			_ = r.Close()
 | 
			
		||||
			return strings.Join(results, ""), usage, nil
 | 
			
		||||
		} else {
 | 
			
		||||
			log.DefaultLogger.Error(err.Error())
 | 
			
		||||
			return "", llm.Usage{}, err
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		t1 := time.Now().UnixMilli()
 | 
			
		||||
		if r, err := c.CreateChatCompletion(context.Background(), opt); err == nil {
 | 
			
		||||
			t2 := time.Now().UnixMilli() - t1
 | 
			
		||||
			results := make([]string, 0)
 | 
			
		||||
			if r.Choices != nil {
 | 
			
		||||
				for _, ch := range r.Choices {
 | 
			
		||||
					results = append(results, ch.Message.Content)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			return strings.Join(results, ""), llm.Usage{
 | 
			
		||||
				AskTokens:    int64(r.Usage.PromptTokens),
 | 
			
		||||
				AnswerTokens: int64(r.Usage.CompletionTokens),
 | 
			
		||||
				TotalTokens:  int64(r.Usage.TotalTokens),
 | 
			
		||||
				UsedTime:     t2,
 | 
			
		||||
			}, nil
 | 
			
		||||
		} else {
 | 
			
		||||
			//fmt.Println(u.BMagenta(err.Error()), u.BMagenta(u.JsonP(r)))
 | 
			
		||||
			return "", llm.Usage{}, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) FastEmbedding(text string) ([]byte, llm.Usage, error) {
 | 
			
		||||
	return lm.Embedding(text, string(openai.AdaEmbeddingV2))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestEmbedding(text string) ([]byte, llm.Usage, error) {
 | 
			
		||||
	return lm.Embedding(text, string(openai.LargeEmbedding3))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) Embedding(text, model string) ([]byte, llm.Usage, error) {
 | 
			
		||||
	fmt.Println(111, model, text)
 | 
			
		||||
	openaiConf := openai.DefaultConfig(lm.config.ApiKey)
 | 
			
		||||
	if lm.config.Endpoint != "" {
 | 
			
		||||
		openaiConf.BaseURL = lm.config.Endpoint
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c := openai.NewClientWithConfig(openaiConf)
 | 
			
		||||
	req := openai.EmbeddingRequest{
 | 
			
		||||
		Input:          text,
 | 
			
		||||
		Model:          openai.EmbeddingModel(model),
 | 
			
		||||
		User:           "",
 | 
			
		||||
		EncodingFormat: "",
 | 
			
		||||
		Dimensions:     0,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if lm.config.Debug {
 | 
			
		||||
		fmt.Println(u.JsonP(req))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t1 := time.Now().UnixMilli()
 | 
			
		||||
	if r, err := c.CreateEmbeddings(context.Background(), req); err == nil {
 | 
			
		||||
		t2 := time.Now().UnixMilli() - t1
 | 
			
		||||
		buf := new(bytes.Buffer)
 | 
			
		||||
		if r.Data != nil {
 | 
			
		||||
			for _, ch := range r.Data {
 | 
			
		||||
				for _, v := range ch.Embedding {
 | 
			
		||||
					_ = binary.Write(buf, binary.LittleEndian, v)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		fmt.Println(len(buf.Bytes()))
 | 
			
		||||
		return buf.Bytes(), llm.Usage{
 | 
			
		||||
			AskTokens:    int64(r.Usage.PromptTokens),
 | 
			
		||||
			AnswerTokens: int64(r.Usage.CompletionTokens),
 | 
			
		||||
			TotalTokens:  int64(r.Usage.TotalTokens),
 | 
			
		||||
			UsedTime:     t2,
 | 
			
		||||
		}, nil
 | 
			
		||||
	} else {
 | 
			
		||||
		fmt.Println(err.Error())
 | 
			
		||||
		return nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@ -1,81 +0,0 @@
 | 
			
		||||
package openai
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"apigo.cc/ai/llm/llm"
 | 
			
		||||
	"github.com/sashabaranov/go-openai"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type LLM struct {
 | 
			
		||||
	config llm.Config
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var TypeMap = map[string]openai.ChatMessagePartType{
 | 
			
		||||
	llm.TypeText:  openai.ChatMessagePartTypeText,
 | 
			
		||||
	llm.TypeImage: openai.ChatMessagePartTypeImageURL,
 | 
			
		||||
	//llm.TypeVideo: "video_url",
 | 
			
		||||
}
 | 
			
		||||
var RoleMap = map[string]string{
 | 
			
		||||
	llm.RoleSystem:    openai.ChatMessageRoleSystem,
 | 
			
		||||
	llm.RoleUser:      openai.ChatMessageRoleUser,
 | 
			
		||||
	llm.RoleAssistant: openai.ChatMessageRoleAssistant,
 | 
			
		||||
	llm.RoleTool:      openai.ChatMessageRoleTool,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ModelGPT_4_32k_0613         = "gpt-4-32k-0613"
 | 
			
		||||
	ModelGPT_4_32k_0314         = "gpt-4-32k-0314"
 | 
			
		||||
	ModelGPT_4_32k              = "gpt-4-32k"
 | 
			
		||||
	ModelGPT_4_0613             = "gpt-4-0613"
 | 
			
		||||
	ModelGPT_4_0314             = "gpt-4-0314"
 | 
			
		||||
	ModelGPT_4o                 = "gpt-4o"
 | 
			
		||||
	ModelGPT_4o_2024_05_13      = "gpt-4o-2024-05-13"
 | 
			
		||||
	ModelGPT_4o_2024_08_06      = "gpt-4o-2024-08-06"
 | 
			
		||||
	ModelGPT_4o_mini            = "gpt-4o-mini"
 | 
			
		||||
	ModelGPT_4o_mini_2024_07_18 = "gpt-4o-mini-2024-07-18"
 | 
			
		||||
	ModelGPT_4_turbo            = "gpt-4-turbo"
 | 
			
		||||
	ModelGPT_4_turbo_2024_04_09 = "gpt-4-turbo-2024-04-09"
 | 
			
		||||
	ModelGPT_4_0125_preview     = "gpt-4-0125-preview"
 | 
			
		||||
	ModelGPT_4_1106_preview     = "gpt-4-1106-preview"
 | 
			
		||||
	ModelGPT_4_turbo_preview    = "gpt-4-turbo-preview"
 | 
			
		||||
	ModelGPT_4_vision_preview   = "gpt-4-vision-preview"
 | 
			
		||||
	ModelGPT_4                  = "gpt-4"
 | 
			
		||||
	ModelGPT_3_5_turbo_0125     = "gpt-3.5-turbo-0125"
 | 
			
		||||
	ModelGPT_3_5_turbo_1106     = "gpt-3.5-turbo-1106"
 | 
			
		||||
	ModelGPT_3_5_turbo_0613     = "gpt-3.5-turbo-0613"
 | 
			
		||||
	ModelGPT_3_5_turbo_0301     = "gpt-3.5-turbo-0301"
 | 
			
		||||
	ModelGPT_3_5_turbo_16k      = "gpt-3.5-turbo-16k"
 | 
			
		||||
	ModelGPT_3_5_turbo_16k_0613 = "gpt-3.5-turbo-16k-0613"
 | 
			
		||||
	ModelGPT_3_5_turbo          = "gpt-3.5-turbo"
 | 
			
		||||
	ModelGPT_3_5_turbo_instruct = "gpt-3.5-turbo-instruct"
 | 
			
		||||
	ModelDavinci_002            = "davinci-002"
 | 
			
		||||
	ModelCurie                  = "curie"
 | 
			
		||||
	ModelCurie_002              = "curie-002"
 | 
			
		||||
	ModelAda_002                = "ada-002"
 | 
			
		||||
	ModelBabbage_002            = "babbage-002"
 | 
			
		||||
	ModelCode_davinci_002       = "code-davinci-002"
 | 
			
		||||
	ModelCode_cushman_001       = "code-cushman-001"
 | 
			
		||||
	ModelCode_davinci_001       = "code-davinci-001"
 | 
			
		||||
	ModelDallE2Std              = "dall-e-2"
 | 
			
		||||
	ModelDallE2HD               = "dall-e-2-hd"
 | 
			
		||||
	ModelDallE3Std              = "dall-e-3"
 | 
			
		||||
	ModelDallE3HD               = "dall-e-3-hd"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (ag *LLM) Support() llm.Support {
 | 
			
		||||
	return llm.Support{
 | 
			
		||||
		Ask:                    true,
 | 
			
		||||
		AskWithImage:           true,
 | 
			
		||||
		AskWithVideo:           false,
 | 
			
		||||
		AskWithCodeInterpreter: true,
 | 
			
		||||
		AskWithWebSearch:       false,
 | 
			
		||||
		MakeImage:              true,
 | 
			
		||||
		MakeVideo:              false,
 | 
			
		||||
		Models:                 []string{ModelGPT_4_32k_0613, ModelGPT_4_32k_0314, ModelGPT_4_32k, ModelGPT_4_0613, ModelGPT_4_0314, ModelGPT_4o, ModelGPT_4o_2024_05_13, ModelGPT_4o_2024_08_06, ModelGPT_4o_mini, ModelGPT_4o_mini_2024_07_18, ModelGPT_4_turbo, ModelGPT_4_turbo_2024_04_09, ModelGPT_4_0125_preview, ModelGPT_4_1106_preview, ModelGPT_4_turbo_preview, ModelGPT_4_vision_preview, ModelGPT_4, ModelGPT_3_5_turbo_0125, ModelGPT_3_5_turbo_1106, ModelGPT_3_5_turbo_0613, ModelGPT_3_5_turbo_0301, ModelGPT_3_5_turbo_16k, ModelGPT_3_5_turbo_16k_0613, ModelGPT_3_5_turbo, ModelGPT_3_5_turbo_instruct, ModelDavinci_002, ModelCurie, ModelCurie_002, ModelAda_002, ModelBabbage_002, ModelCode_davinci_002, ModelCode_cushman_001, ModelCode_davinci_001, ModelDallE2Std, ModelDallE2HD, ModelDallE3Std, ModelDallE3HD},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	llm.Register("openai", func(config llm.Config) llm.LLM {
 | 
			
		||||
		return &LLM{config: config}
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										83
									
								
								openai/gc.go
									
									
									
									
									
								
							
							
						
						
									
										83
									
								
								openai/gc.go
									
									
									
									
									
								
							@ -1,83 +0,0 @@
 | 
			
		||||
package openai
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"apigo.cc/ai/llm/llm"
 | 
			
		||||
	"context"
 | 
			
		||||
	"github.com/sashabaranov/go-openai"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
//	func (lm *LLM) FastMakeImage(prompt, size, refImage string) ([]string, llm.Usage, error) {
 | 
			
		||||
//		return lm.MakeImage(ModelDallE3Std, prompt, size, refImage)
 | 
			
		||||
//	}
 | 
			
		||||
//
 | 
			
		||||
//	func (lm *LLM) BestMakeImage(prompt, size, refImage string) ([]string, llm.Usage, error) {
 | 
			
		||||
//		return lm.MakeImage(ModelDallE3HD, prompt, size, refImage)
 | 
			
		||||
//	}
 | 
			
		||||
//
 | 
			
		||||
// func (lm *LLM) MakeImage(model, prompt, size, refImage string) ([]string, llm.Usage, error) {
 | 
			
		||||
func (lm *LLM) FastMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
 | 
			
		||||
	config.Model = ModelDallE3Std
 | 
			
		||||
	return lm.MakeImage(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
 | 
			
		||||
	config.Model = ModelDallE3HD
 | 
			
		||||
	return lm.MakeImage(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) MakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
 | 
			
		||||
	openaiConf := openai.DefaultConfig(lm.config.ApiKey)
 | 
			
		||||
	if lm.config.Endpoint != "" {
 | 
			
		||||
		openaiConf.BaseURL = lm.config.Endpoint
 | 
			
		||||
	}
 | 
			
		||||
	config.SetDefault(&lm.config.GCConfig)
 | 
			
		||||
	c := openai.NewClientWithConfig(openaiConf)
 | 
			
		||||
	style := openai.CreateImageStyleVivid
 | 
			
		||||
	if (!strings.Contains(prompt, "vivid") || !strings.Contains(prompt, "生动的")) && (strings.Contains(prompt, "natural") || strings.Contains(prompt, "自然的")) {
 | 
			
		||||
		style = openai.CreateImageStyleNatural
 | 
			
		||||
	}
 | 
			
		||||
	quality := openai.CreateImageQualityStandard
 | 
			
		||||
	model := config.GetModel()
 | 
			
		||||
	if strings.HasSuffix(model, "-hd") {
 | 
			
		||||
		quality = openai.CreateImageQualityHD
 | 
			
		||||
		model = model[0 : len(model)-3]
 | 
			
		||||
	}
 | 
			
		||||
	t1 := time.Now().UnixMilli()
 | 
			
		||||
	r, err := c.CreateImage(context.Background(), openai.ImageRequest{
 | 
			
		||||
		Prompt:         prompt,
 | 
			
		||||
		Model:          model,
 | 
			
		||||
		Quality:        quality,
 | 
			
		||||
		Size:           config.GetSize(),
 | 
			
		||||
		Style:          style,
 | 
			
		||||
		ResponseFormat: openai.CreateImageResponseFormatURL,
 | 
			
		||||
	})
 | 
			
		||||
	t2 := time.Now().UnixMilli() - t1
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		results := make([]string, 0)
 | 
			
		||||
		for _, item := range r.Data {
 | 
			
		||||
			results = append(results, item.URL)
 | 
			
		||||
		}
 | 
			
		||||
		return results, llm.Usage{
 | 
			
		||||
			AskTokens:    0,
 | 
			
		||||
			AnswerTokens: 0,
 | 
			
		||||
			TotalTokens:  0,
 | 
			
		||||
			UsedTime:     t2,
 | 
			
		||||
		}, nil
 | 
			
		||||
	} else {
 | 
			
		||||
		return nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) FastMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
 | 
			
		||||
	return lm.MakeVideo(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
 | 
			
		||||
	return lm.MakeVideo(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) MakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
 | 
			
		||||
	return nil, nil, llm.Usage{}, nil
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										195
									
								
								zhipu/chat.go
									
									
									
									
									
								
							
							
						
						
									
										195
									
								
								zhipu/chat.go
									
									
									
									
									
								
							@ -1,195 +0,0 @@
 | 
			
		||||
package zhipu
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"apigo.cc/ai/llm/llm"
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"github.com/ssgo/u"
 | 
			
		||||
	"github.com/yankeguo/zhipu"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) FastAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM4Flash,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) LongAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM4Long,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BatterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM4Plus,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM40520,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) MultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM4VPlus,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestMultiAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM4V,
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) CodeInterpreterAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM4AllTools,
 | 
			
		||||
		Tools: map[string]any{llm.ToolCodeInterpreter: nil},
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) WebSearchAsk(messages []llm.ChatMessage, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	return lm.Ask(messages, llm.ChatConfig{
 | 
			
		||||
		Model: ModelGLM4AllTools,
 | 
			
		||||
		Tools: map[string]any{llm.ToolWebSearch: nil},
 | 
			
		||||
	}, callback)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) Ask(messages []llm.ChatMessage, config llm.ChatConfig, callback func(answer string)) (string, llm.Usage, error) {
 | 
			
		||||
	config.SetDefault(&lm.config.ChatConfig)
 | 
			
		||||
	c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cc := c.ChatCompletion(config.GetModel())
 | 
			
		||||
	for _, msg := range messages {
 | 
			
		||||
		var contents []zhipu.ChatCompletionMultiContent
 | 
			
		||||
		if msg.Contents != nil {
 | 
			
		||||
			contents = make([]zhipu.ChatCompletionMultiContent, len(msg.Contents))
 | 
			
		||||
			for j, inPart := range msg.Contents {
 | 
			
		||||
				part := zhipu.ChatCompletionMultiContent{}
 | 
			
		||||
				part.Type = NameMap[inPart.Type]
 | 
			
		||||
				switch inPart.Type {
 | 
			
		||||
				case llm.TypeText:
 | 
			
		||||
					part.Text = inPart.Content
 | 
			
		||||
				case llm.TypeImage:
 | 
			
		||||
					part.ImageURL = &zhipu.URLItem{URL: inPart.Content}
 | 
			
		||||
					//case llm.TypeVideo:
 | 
			
		||||
					//	part.VideoURL = &zhipu.URLItem{URL: inPart.Content}
 | 
			
		||||
				}
 | 
			
		||||
				contents[j] = part
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if len(contents) == 1 && contents[0].Type == llm.TypeText {
 | 
			
		||||
			cc.AddMessage(zhipu.ChatCompletionMessage{
 | 
			
		||||
				Role:    NameMap[msg.Role],
 | 
			
		||||
				Content: contents[0].Text,
 | 
			
		||||
			})
 | 
			
		||||
		} else {
 | 
			
		||||
			cc.AddMessage(zhipu.ChatCompletionMultiMessage{
 | 
			
		||||
				Role:    NameMap[msg.Role],
 | 
			
		||||
				Content: contents,
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for name := range config.GetTools() {
 | 
			
		||||
		switch name {
 | 
			
		||||
		case llm.ToolCodeInterpreter:
 | 
			
		||||
			cc.AddTool(zhipu.ChatCompletionToolCodeInterpreter{})
 | 
			
		||||
		case llm.ToolWebSearch:
 | 
			
		||||
			cc.AddTool(zhipu.ChatCompletionToolWebBrowser{})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.GetMaxTokens() != 0 {
 | 
			
		||||
		cc.SetMaxTokens(config.GetMaxTokens())
 | 
			
		||||
	}
 | 
			
		||||
	if config.GetTemperature() != 0 {
 | 
			
		||||
		cc.SetTemperature(config.GetTemperature())
 | 
			
		||||
	}
 | 
			
		||||
	if config.GetTopP() != 0 {
 | 
			
		||||
		cc.SetTopP(config.GetTopP())
 | 
			
		||||
	}
 | 
			
		||||
	if callback != nil {
 | 
			
		||||
		cc.SetStreamHandler(func(r2 zhipu.ChatCompletionResponse) error {
 | 
			
		||||
			if r2.Choices != nil {
 | 
			
		||||
				for _, ch := range r2.Choices {
 | 
			
		||||
					text := ch.Delta.Content
 | 
			
		||||
					callback(text)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			return nil
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if lm.config.Debug {
 | 
			
		||||
		fmt.Println(cc.BatchMethod(), cc.BatchURL())
 | 
			
		||||
		fmt.Println(u.JsonP(cc.BatchBody()))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	t1 := time.Now().UnixMilli()
 | 
			
		||||
	if r, err := cc.Do(context.Background()); err == nil {
 | 
			
		||||
		t2 := time.Now().UnixMilli() - t1
 | 
			
		||||
		results := make([]string, 0)
 | 
			
		||||
		if r.Choices != nil {
 | 
			
		||||
			for _, ch := range r.Choices {
 | 
			
		||||
				results = append(results, ch.Message.Content)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return strings.Join(results, ""), llm.Usage{
 | 
			
		||||
			AskTokens:    r.Usage.PromptTokens,
 | 
			
		||||
			AnswerTokens: r.Usage.CompletionTokens,
 | 
			
		||||
			TotalTokens:  r.Usage.TotalTokens,
 | 
			
		||||
			UsedTime:     t2,
 | 
			
		||||
		}, nil
 | 
			
		||||
	} else {
 | 
			
		||||
		return "", llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) FastEmbedding(text string) ([]byte, llm.Usage, error) {
 | 
			
		||||
	return lm.Embedding(text, ModelEmbedding3)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestEmbedding(text string) ([]byte, llm.Usage, error) {
 | 
			
		||||
	return lm.Embedding(text, ModelEmbedding3)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) Embedding(text, model string) ([]byte, llm.Usage, error) {
 | 
			
		||||
	c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cc := c.Embedding(model)
 | 
			
		||||
	cc.SetInput(text)
 | 
			
		||||
	t1 := time.Now().UnixMilli()
 | 
			
		||||
	if r, err := cc.Do(context.Background()); err == nil {
 | 
			
		||||
		t2 := time.Now().UnixMilli() - t1
 | 
			
		||||
		buf := new(bytes.Buffer)
 | 
			
		||||
		if r.Data != nil {
 | 
			
		||||
			for _, ch := range r.Data {
 | 
			
		||||
				for _, v := range ch.Embedding {
 | 
			
		||||
					_ = binary.Write(buf, binary.LittleEndian, float32(v))
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return buf.Bytes(), llm.Usage{
 | 
			
		||||
			AskTokens:    r.Usage.PromptTokens,
 | 
			
		||||
			AnswerTokens: r.Usage.CompletionTokens,
 | 
			
		||||
			TotalTokens:  r.Usage.TotalTokens,
 | 
			
		||||
			UsedTime:     t2,
 | 
			
		||||
		}, nil
 | 
			
		||||
	} else {
 | 
			
		||||
		return nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@ -1,60 +0,0 @@
 | 
			
		||||
package zhipu
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"apigo.cc/ai/llm/llm"
 | 
			
		||||
	"github.com/yankeguo/zhipu"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type LLM struct {
 | 
			
		||||
	config llm.Config
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var NameMap = map[string]string{
 | 
			
		||||
	llm.TypeText:  zhipu.MultiContentTypeText,
 | 
			
		||||
	llm.TypeImage: zhipu.MultiContentTypeImageURL,
 | 
			
		||||
	//llm.TypeVideo:     zhipu.MultiContentTypeVideoURL,
 | 
			
		||||
	llm.RoleSystem:    zhipu.RoleSystem,
 | 
			
		||||
	llm.RoleUser:      zhipu.RoleUser,
 | 
			
		||||
	llm.RoleAssistant: zhipu.RoleAssistant,
 | 
			
		||||
	llm.RoleTool:      zhipu.RoleTool,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	ModelGLM4Plus     = "GLM-4-Plus"
 | 
			
		||||
	ModelGLM40520     = "GLM-4-0520"
 | 
			
		||||
	ModelGLM4Long     = "GLM-4-Long"
 | 
			
		||||
	ModelGLM4AirX     = "GLM-4-AirX"
 | 
			
		||||
	ModelGLM4Air      = "GLM-4-Air"
 | 
			
		||||
	ModelGLM4Flash    = "GLM-4-Flash"
 | 
			
		||||
	ModelGLM4AllTools = "GLM-4-AllTools"
 | 
			
		||||
	ModelGLM4         = "GLM-4"
 | 
			
		||||
	ModelGLM4VPlus    = "GLM-4V-Plus"
 | 
			
		||||
	ModelGLM4V        = "GLM-4V"
 | 
			
		||||
	ModelCogVideoX    = "CogVideoX"
 | 
			
		||||
	ModelCogView3Plus = "CogView-3-Plus"
 | 
			
		||||
	ModelCogView3     = "CogView-3"
 | 
			
		||||
	ModelEmbedding3   = "Embedding-3"
 | 
			
		||||
	ModelEmbedding2   = "Embedding-2"
 | 
			
		||||
	ModelCharGLM3     = "CharGLM-3"
 | 
			
		||||
	ModelEmohaa       = "Emohaa"
 | 
			
		||||
	ModelCodeGeeX4    = "CodeGeeX-4"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) Support() llm.Support {
 | 
			
		||||
	return llm.Support{
 | 
			
		||||
		Ask:                    true,
 | 
			
		||||
		AskWithImage:           true,
 | 
			
		||||
		AskWithVideo:           true,
 | 
			
		||||
		AskWithCodeInterpreter: true,
 | 
			
		||||
		AskWithWebSearch:       true,
 | 
			
		||||
		MakeImage:              true,
 | 
			
		||||
		MakeVideo:              true,
 | 
			
		||||
		Models:                 []string{ModelGLM4Plus, ModelGLM40520, ModelGLM4Long, ModelGLM4AirX, ModelGLM4Air, ModelGLM4Flash, ModelGLM4AllTools, ModelGLM4, ModelGLM4VPlus, ModelGLM4V, ModelCogVideoX, ModelCogView3Plus, ModelCogView3, ModelEmbedding3, ModelEmbedding2, ModelCharGLM3, ModelEmohaa, ModelCodeGeeX4},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	llm.Register("zhipu", func(config llm.Config) llm.LLM {
 | 
			
		||||
		return &LLM{config: config}
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										94
									
								
								zhipu/gc.go
									
									
									
									
									
								
							
							
						
						
									
										94
									
								
								zhipu/gc.go
									
									
									
									
									
								
							@ -1,94 +0,0 @@
 | 
			
		||||
package zhipu
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"apigo.cc/ai/llm/llm"
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"github.com/yankeguo/zhipu"
 | 
			
		||||
	"time"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) FastMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
 | 
			
		||||
	config.Model = ModelCogView3Plus
 | 
			
		||||
	return lm.MakeImage(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestMakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
 | 
			
		||||
	config.Model = ModelCogView3
 | 
			
		||||
	return lm.MakeImage(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) MakeImage(prompt string, config llm.GCConfig) ([]string, llm.Usage, error) {
 | 
			
		||||
	c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	config.SetDefault(&lm.config.GCConfig)
 | 
			
		||||
	cc := c.ImageGeneration(config.Model).SetPrompt(prompt)
 | 
			
		||||
	//cc.SetSize(config.GetSize())
 | 
			
		||||
 | 
			
		||||
	t1 := time.Now().UnixMilli()
 | 
			
		||||
	if r, err := cc.Do(context.Background()); err == nil {
 | 
			
		||||
		t2 := time.Now().UnixMilli() - t1
 | 
			
		||||
		results := make([]string, 0)
 | 
			
		||||
		for _, item := range r.Data {
 | 
			
		||||
			results = append(results, item.URL)
 | 
			
		||||
		}
 | 
			
		||||
		return results, llm.Usage{
 | 
			
		||||
			UsedTime: t2,
 | 
			
		||||
		}, nil
 | 
			
		||||
	} else {
 | 
			
		||||
		return nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) FastMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
 | 
			
		||||
	config.Model = ModelCogVideoX
 | 
			
		||||
	return lm.MakeVideo(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) BestMakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
 | 
			
		||||
	config.Model = ModelCogVideoX
 | 
			
		||||
	return lm.MakeVideo(prompt, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *LLM) MakeVideo(prompt string, config llm.GCConfig) ([]string, []string, llm.Usage, error) {
 | 
			
		||||
	c, err := zhipu.NewClient(zhipu.WithAPIKey(lm.config.ApiKey), zhipu.WithBaseURL(lm.config.Endpoint))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	config.SetDefault(&lm.config.GCConfig)
 | 
			
		||||
	cc := c.VideoGeneration(config.Model).SetPrompt(prompt)
 | 
			
		||||
	cc.SetImageURL(config.GetRef())
 | 
			
		||||
 | 
			
		||||
	t1 := time.Now().UnixMilli()
 | 
			
		||||
	if resp, err := cc.Do(context.Background()); err == nil {
 | 
			
		||||
		t2 := time.Now().UnixMilli() - t1
 | 
			
		||||
		for i := 0; i < 1200; i++ {
 | 
			
		||||
			r, err := c.AsyncResult(resp.ID).Do(context.Background())
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, nil, llm.Usage{}, err
 | 
			
		||||
			}
 | 
			
		||||
			if r.TaskStatus == zhipu.VideoGenerationTaskStatusSuccess {
 | 
			
		||||
				covers := make([]string, 0)
 | 
			
		||||
				results := make([]string, 0)
 | 
			
		||||
				for _, item := range r.VideoResult {
 | 
			
		||||
					results = append(results, item.URL)
 | 
			
		||||
					covers = append(covers, item.CoverImageURL)
 | 
			
		||||
				}
 | 
			
		||||
				return results, covers, llm.Usage{
 | 
			
		||||
					UsedTime: t2,
 | 
			
		||||
				}, nil
 | 
			
		||||
			}
 | 
			
		||||
			if r.TaskStatus == zhipu.VideoGenerationTaskStatusFail {
 | 
			
		||||
				return nil, nil, llm.Usage{}, errors.New("fail on task " + resp.ID)
 | 
			
		||||
			}
 | 
			
		||||
			time.Sleep(3 * time.Second)
 | 
			
		||||
		}
 | 
			
		||||
		return nil, nil, llm.Usage{}, errors.New("timeout on task " + resp.ID)
 | 
			
		||||
	} else {
 | 
			
		||||
		return nil, nil, llm.Usage{}, err
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user