Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update/kafka implementations #3753

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions src/workflows/airqo_etl_utils/airqo_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1040,6 +1040,7 @@ def get_devices(group_id: str) -> pd.DataFrame:
"""
from airqo_etl_utils.message_broker_utils import MessageBrokerUtils
from confluent_kafka import KafkaException
import json

broker = MessageBrokerUtils()
devices_list: list = []
Expand All @@ -1048,12 +1049,15 @@ def get_devices(group_id: str) -> pd.DataFrame:
topic="devices-topic",
group_id=group_id,
auto_offset_reset="earliest",
from_beginning=True,
auto_commit=False,
):
try:
key = message.key()
value = message.value()

key = message.get("key", None)
try:
value = json.loads(message.get("value", None))
except json.JSONDecodeError as e:
logger.exception(f"Error decoding JSON: {e}")
continue
if not key or not value.get("device_id"):
logger.info(
f"Skipping message with key: {key}, missing 'device_id'."
Expand Down
14 changes: 1 addition & 13 deletions src/workflows/airqo_etl_utils/message_broker_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def __init__(self):
self.partition_loads = {int(p): 0 for p in self.__partitions}
self.config = {
"bootstrap.servers": self.__bootstrap_servers,
"request.timeout.ms": 300000,
"metadata.max.age.ms": 60000,
}

Expand Down Expand Up @@ -153,6 +152,7 @@ def publish_to_topic(
"linger.ms": 100,
"message.timeout.ms": 300000,
"message.max.bytes": 2 * 1024 * 1024,
"request.timeout.ms": 300000,
}
)
producer = Producer(producer_config)
Expand Down Expand Up @@ -204,7 +204,6 @@ def consume_from_topic(
auto_offset_reset: str = "latest",
max_messages: Optional[int] = None,
auto_commit: bool = True,
from_beginning: bool = False,
offset: Optional[int] = None,
wait_time_sec: int = 30,
streaming: bool = False,
Expand All @@ -218,7 +217,6 @@ def consume_from_topic(
auto_offset_reset: Determines where to start reading when there's no valid offset. Default is 'latest'.
max_messages: Limit on the number of messages to consume. If None, consume all available messages.
auto_commit: Whether to auto-commit offsets.
from_beginning: Whether to start consuming from the beginning of the topic.
offset: Start consuming from a specific offset if provided.
wait_time_sec: How long to wait for messages (useful for one-time data requests).
streaming: If True, run as a continuous streaming job.
Expand Down Expand Up @@ -248,16 +246,6 @@ def consume_from_topic(
assigned = True
wait_time_sec -= 1

if from_beginning:
logger.info("Seeking to the beginning of all partitions...")
partitions = [
TopicPartition(topic, p.partition, offset=0)
for p in consumer.assignment()
]
consumer.assign(partitions)
for partition in partitions:
consumer.seek(partition)

if offset is not None:
logger.info(f"Seeking to offset {offset} for all partitions...")
partitions = [
Expand Down
Loading