package packages
import (
"context"
"encoding/json"
"fmt"
"time"
kafkaBroker "github.com/segmentio/kafka-go"
)
type (
Ikafka interface {
Publisher(key string, topic string, body interface{}) error
Consumer(topic, groupId string, handler func(message kafkaBroker.Message) error) (*kafkaBroker.Reader, error)
}
kafka struct {
ctx context.Context
brokers []string
}
)
func NewKafka(ctx context.Context, brokers []string) Ikafka {
return &kafka{ctx: ctx, brokers: brokers}
}
func (h *kafka) Publisher(key string, topic string, body interface{}) error {
broker := kafkaBroker.Writer{
Addr: kafkaBroker.TCP(h.brokers...),
Topic: topic,
Compression: kafkaBroker.Snappy,
RequiredAcks: kafkaBroker.RequireAll,
AllowAutoTopicCreation: true,
BatchBytes: 1e+9,
BatchSize: 50,
MaxAttempts: 10,
ErrorLogger: kafkaBroker.LoggerFunc(func(s string, i ...interface{}) {
Logrus("error", s)
}),
}
bodyByte, err := json.Marshal(body)
if err != nil {
return err
}
msg := kafkaBroker.Message{Key: []byte(key), Value: bodyByte}
if err := broker.WriteMessages(h.ctx, msg); err != nil {
return err
}
return nil
}
func (h *kafka) Consumer(topic, groupId string, handler func(message kafkaBroker.Message) error) (*kafkaBroker.Reader, error) {
reader := kafkaBroker.NewReader(kafkaBroker.ReaderConfig{
Brokers: h.brokers,
Topic: topic,
GroupID: groupId,
WatchPartitionChanges: true,
OffsetOutOfRangeError: true,
MaxBytes: 1e+9,
MaxAttempts: 10,
PartitionWatchInterval: 3,
JoinGroupBackoff: 3,
ErrorLogger: kafkaBroker.LoggerFunc(func(s string, i ...interface{}) {
Logrus("error", s)
}),
})
message, err := reader.FetchMessage(h.ctx)
if err != nil {
return nil, err
}
if err := handler(message); err != nil {
return nil, err
}
if err := reader.CommitMessages(h.ctx, message); err != nil {
return nil, err
}
return reader, nil
}
func (h *kafka) ResetOffset(topic, groupId string) error {
broker, err := kafkaBroker.DialContext(h.ctx, "tcp", h.brokers[0])
defer broker.Close()
if err != nil {
return err
}
partitions, err := broker.ReadPartitions(topic)
if err != nil {
return err
}
offsets := make(map[int]int64, len(partitions))
for _, partition := range partitions {
host := fmt.Sprintf("%s:%d", partition.Leader.Host, partition.Leader.Port)
connection, err := kafkaBroker.DialLeader(h.ctx, "tcp", host, topic, partition.ID)
if err != nil {
return err
}
defer connection.Close()
offset, err := connection.ReadOffset(time.Unix(int64(time.Now().Second()), 0))
if err != nil {
return err
}
offsets[partition.ID] = offset
}
group, err := kafkaBroker.NewConsumerGroup(kafkaBroker.ConsumerGroupConfig{
Brokers: h.brokers,
Topics: []string{topic},
ID: groupId,
})
if err != nil {
return err
}
defer group.Close()
groupNext, err := group.Next(h.ctx)
if err != nil {
return err
}
err = groupNext.CommitOffsets(map[string]map[int]int64{topic: offsets})
if err != nil {
return err
}
return nil
}