2025-05-17 11:36:26 -04:00

168 lines
3.8 KiB
Go

package kafka
import (
"context"
"producer/internal/logging"
"producer/pkg/utils"
"strconv"
"strings"
"sync"
"time"
kafka "github.com/segmentio/kafka-go"
)
// KafkaClient gestiona la conexión con Kafka
type KafkaClient struct {
brokers []string
mu sync.Mutex
writers map[string]*kafka.Writer
}
// NewKafkaClient crea un nuevo cliente Kafka
func NewKafkaClient(bootstrapServers string) *KafkaClient {
return &KafkaClient{
brokers: strings.Split(bootstrapServers, ","),
writers: make(map[string]*kafka.Writer),
}
}
// getWriter obtiene o crea un writer para el topic dado
func (kc *KafkaClient) getWriter(topic string) *kafka.Writer {
kc.mu.Lock()
defer kc.mu.Unlock()
if w, ok := kc.writers[topic]; ok {
return w
}
// Determinar RequiredAcks basado en variable de entorno
var requiredAcks int
if acks := utils.GetEnv("KAFKA_REQUIRED_ACKS", "all"); acks != "all" {
if acksNum, err := strconv.Atoi(acks); err == nil {
requiredAcks = acksNum
} else {
requiredAcks = int(kafka.RequireAll)
}
} else {
requiredAcks = int(kafka.RequireAll)
}
w := kafka.NewWriter(kafka.WriterConfig{
Brokers: kc.brokers,
Topic: topic,
Balancer: &kafka.LeastBytes{},
RequiredAcks: requiredAcks,
Async: utils.GetEnvBool("KAFKA_ASYNC", false),
})
kc.writers[topic] = w
return w
}
// Publish publica un mensaje en cualquier topic
func (kc *KafkaClient) Publish(ctx context.Context, topic, key string, value []byte) error {
writer := kc.getWriter(topic)
msg := kafka.Message{
Key: []byte(key),
Value: value,
Time: time.Now(),
}
return writer.WriteMessages(ctx, msg)
}
// Close cierra todos los writers
func (kc *KafkaClient) Close() error {
kc.mu.Lock()
defer kc.mu.Unlock()
var err error
for _, w := range kc.writers {
if e := w.Close(); e != nil {
logging.ErrorLogger.Printf("Error cerrando writer: %v", e)
err = e
}
}
return err
}
//package kafka
//
//import (
// "context"
// "producer/pkg/utils"
// "strconv"
// "strings"
// "sync"
// "time"
//
// kafka "github.com/segmentio/kafka-go"
//)
//
//// KafkaClient gestiona la conexión con Kafka
//type KafkaClient struct {
// brokers []string
// mu sync.Mutex
// writers map[string]*kafka.Writer
//}
//
//// NewKafkaClient crea un nuevo cliente Kafka
//func NewKafkaClient(bootstrapServers string) *KafkaClient {
// return &KafkaClient{
// brokers: strings.Split(bootstrapServers, ","),
// writers: make(map[string]*kafka.Writer),
// }
//}
//
//// getWriter obtiene o crea un writer para el topic dado
//func (kc *KafkaClient) getWriter(topic string) *kafka.Writer {
// kc.mu.Lock()
// defer kc.mu.Unlock()
//
// if w, ok := kc.writers[topic]; ok {
// return w
// }
//
// // Determinar RequiredAcks basado en variable de entorno
// requiredAcks := kafka.RequireAll
// if acks := utils.GetEnv("KAFKA_REQUIRED_ACKS", "all"); acks != "all" {
// if acksNum, err := strconv.Atoi(acks); err == nil {
// requiredAcks = kafka.RequiredAcks(acksNum)
// }
// }
//
// w := kafka.NewWriter(kafka.WriterConfig{
// Brokers: kc.brokers,
// Topic: topic,
// Balancer: &kafka.LeastBytes{},
// RequiredAcks: requiredAcks,
// Async: utils.GetEnvBool("KAFKA_ASYNC", false),
// })
// kc.writers[topic] = w
// return w
//}
//
//// Publish publica un mensaje en cualquier topic
//func (kc *KafkaClient) Publish(ctx context.Context, topic, key string, value []byte) error {
// writer := kc.getWriter(topic)
// msg := kafka.Message{
// Key: []byte(key),
// Value: value,
// Time: time.Now(),
// }
// return writer.WriteMessages(ctx, msg)
//}
//
//// Close cierra todos los writers
//func (kc *KafkaClient) Close() error {
// kc.mu.Lock()
// defer kc.mu.Unlock()
//
// var err error
// for topic, w := range kc.writers {
// if e := w.Close(); e != nil {
// err = e
// }
// }
// return err
//}