feat: migrate from msgpack to protobuf (handler-base v1.0.0)
Some checks failed
CI / Lint (push) Failing after 2m49s
CI / Test (push) Successful in 3m36s
CI / Notify (push) Has been cancelled
CI / Docker Build & Push (push) Has been cancelled
CI / Release (push) Has been cancelled

- Replace msgpack encoding with protobuf wire format
- Update field names to proto convention (UserId, RequestId, EnableRag, etc.)
- Use messages.EffectiveQuery() standalone function
- Cast TopK to int32 for proto compatibility
- Rewrite tests for proto round-trips
This commit is contained in:
2026-02-21 15:30:04 -05:00
parent 87d0545d2c
commit e2176331c8
5 changed files with 62 additions and 63 deletions

32
main.go
View File

@@ -16,6 +16,7 @@ import (
"git.daviestechlabs.io/daviestechlabs/handler-base/handler"
"git.daviestechlabs.io/daviestechlabs/handler-base/messages"
"git.daviestechlabs.io/daviestechlabs/handler-base/natsutil"
"google.golang.org/protobuf/proto"
)
func main() {
@@ -45,23 +46,23 @@ func main() {
h := handler.New("ai.chat.user.*.message", cfg)
h.OnTypedMessage(func(ctx context.Context, msg *nats.Msg) (any, error) {
req, err := natsutil.Decode[messages.ChatRequest](msg.Data)
if err != nil {
h.OnTypedMessage(func(ctx context.Context, msg *nats.Msg) (proto.Message, error) {
var req messages.ChatRequest
if err := natsutil.Decode(msg.Data, &req); err != nil {
slog.Error("decode failed", "error", err)
return &messages.ErrorResponse{Error: true, Message: err.Error(), Type: "DecodeError"}, nil
}
query := req.EffectiveQuery()
requestID := req.RequestID
query := messages.EffectiveQuery(&req)
requestID := req.RequestId
if requestID == "" {
requestID = "unknown"
}
userID := req.UserID
userID := req.UserId
if userID == "" {
userID = "unknown"
}
enableRAG := req.EnableRAG
enableRAG := req.EnableRag
if !enableRAG && req.Premium {
enableRAG = true
}
@@ -71,13 +72,13 @@ func main() {
}
topK := req.TopK
if topK == 0 {
topK = ragTopK
topK = int32(ragTopK)
}
collection := req.Collection
if collection == "" {
collection = ragCollection
}
reqEnableTTS := req.EnableTTS || enableTTS
reqEnableTTS := req.EnableTts || enableTTS
systemPrompt := req.SystemPrompt
responseSubject := req.ResponseSubject
if responseSubject == "" {
@@ -159,18 +160,19 @@ func main() {
// 5. Generate LLM response (streaming when requested)
var responseText string
var err error
if req.EnableStreaming {
streamSubject := fmt.Sprintf("ai.chat.response.stream.%s", requestID)
responseText, err = llm.StreamGenerate(ctx, query, contextText, systemPrompt, func(token string) {
_ = h.NATS.Publish(streamSubject, &messages.ChatStreamChunk{
RequestID: requestID,
RequestId: requestID,
Type: "chunk",
Content: token,
Timestamp: messages.Timestamp(),
})
})
_ = h.NATS.Publish(streamSubject, &messages.ChatStreamChunk{
RequestID: requestID,
RequestId: requestID,
Type: "done",
Done: true,
Timestamp: messages.Timestamp(),
@@ -181,7 +183,7 @@ func main() {
if err != nil {
slog.Error("LLM generation failed", "error", err)
return &messages.ChatResponse{
UserID: userID,
UserId: userID,
Success: false,
Error: err.Error(),
}, nil
@@ -199,15 +201,15 @@ func main() {
}
result := &messages.ChatResponse{
UserID: userID,
UserId: userID,
Response: responseText,
ResponseText: responseText,
UsedRAG: usedRAG,
UsedRag: usedRAG,
Success: true,
Audio: audio,
}
if includeSources {
result.RAGSources = ragSources
result.RagSources = ragSources
}
// Publish to the response subject the frontend is waiting on