♻️ Use raw API to access ollama

This commit is contained in:
LittleSheep 2025-01-30 14:03:11 +08:00
parent 0f8dd2a709
commit 57709487f8
3 changed files with 40 additions and 23 deletions

2
go.mod
View File

@ -18,6 +18,7 @@ require (
github.com/spf13/viper v1.19.0
github.com/tmc/langchaingo v0.1.12
google.golang.org/grpc v1.70.0
google.golang.org/protobuf v1.36.4
gorm.io/driver/postgres v1.5.9
gorm.io/gorm v1.25.12
)
@ -89,7 +90,6 @@ require (
golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.21.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 // indirect
google.golang.org/protobuf v1.36.4 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gorm.io/datatypes v1.2.4 // indirect

View File

@ -1,15 +1,15 @@
package services
import (
"context"
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/ollama"
"github.com/tmc/langchaingo/prompts"
)
@ -26,16 +26,18 @@ func PingOllama() error {
return nil
}
var LargeModel *ollama.LLM
func ConnectOllama() error {
model := viper.GetString("ollama.model")
llm, err := ollama.New(ollama.WithModel(model), ollama.WithServerURL(viper.GetString("ollama.url")))
if err != nil {
return err
}
LargeModel = llm
return nil
type OllamaResponse struct {
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Response string `json:"response"`
Done bool `json:"done"`
Context []int64 `json:"context"`
TotalDuration int64 `json:"total_duration"`
LoadDuration int64 `json:"load_duration"`
PromptEvalCount int64 `json:"prompt_eval_count"`
PromptEvalDuration int64 `json:"prompt_eval_duration"`
EvalCount int64 `json:"eval_count"`
EvalDuration int64 `json:"eval_duration"`
}
func GenerateInsights(source string) (string, error) {
@ -50,13 +52,33 @@ func GenerateInsights(source string) (string, error) {
return "", fmt.Errorf("failed to format prompt: %v", err)
}
raw, _ := json.Marshal(map[string]any{
"model": viper.GetString("ollama.model"),
"prompt": inPrompt,
"stream": false,
})
start := time.Now()
completion, err := LargeModel.Call(context.Background(), inPrompt,
llms.WithTemperature(0.8),
)
url := viper.GetString("ollama.url") + "/api/generate"
resp, err := http.Post(url, "application/json", bytes.NewBuffer(raw))
if err != nil {
return "", fmt.Errorf("failed to generate insights: %v", err)
}
outRaw, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body: %v", err)
}
var response OllamaResponse
err = json.Unmarshal(outRaw, &response)
if err != nil {
return "", fmt.Errorf("failed to unmarshal response: %v", err)
}
took := time.Since(start)
log.Info().Dur("took", took).Msg("Insight generated successfully...")
return completion, err
return response.Response, err
}

View File

@ -67,11 +67,6 @@ func main() {
log.Fatal().Err(err).Msg("An error occurred when running database auto migration.")
}
// Connect to ollama
if err := services.ConnectOllama(); err != nil {
log.Fatal().Err(err).Msg("An error occurred when connecting to ollama.")
}
// Initialize cache
if err := cache.NewStore(); err != nil {
log.Fatal().Err(err).Msg("An error occurred when initializing cache.")