Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[not for merge] start a monitoring radio plugin API #3315

Draft
wants to merge 4 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion docs/userguide/monitoring.rst
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ configuration. Here the `parsl.monitoring.MonitoringHub` is specified to use por
],
monitoring=MonitoringHub(
hub_address=address_by_hostname(),
hub_port=55055,
monitoring_debug=False,
resource_monitoring_interval=10,
),
Expand Down
1 change: 0 additions & 1 deletion parsl/configs/ASPIRE1.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
],
monitoring=MonitoringHub(
hub_address=address_by_interface('ib0'),
hub_port=55055,
resource_monitoring_interval=10,
),
strategy='simple',
Expand Down
18 changes: 14 additions & 4 deletions parsl/dataflow/dflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,6 @@ def __init__(self, config: Config) -> None:
self.monitoring = config.monitoring

if self.monitoring:
if self.monitoring.logdir is None:
self.monitoring.logdir = self.run_dir
self.monitoring.start(self.run_dir, self.config.run_dir)

self.time_began = datetime.datetime.now()
Expand Down Expand Up @@ -746,11 +744,10 @@ def launch_task(self, task_record: TaskRecord) -> Future:
kwargs=kwargs,
x_try_id=try_id,
x_task_id=task_id,
monitoring_hub_url=self.monitoring.monitoring_hub_url,
radio_config=executor.remote_monitoring_radio_config,
run_id=self.run_id,
logging_level=wrapper_logging_level,
sleep_dur=self.monitoring.resource_monitoring_interval,
radio_mode=executor.radio_mode,
monitor_resources=executor.monitor_resources(),
run_dir=self.run_dir)

Expand Down Expand Up @@ -1148,6 +1145,19 @@ def add_executors(self, executors: Sequence[ParslExecutor]) -> None:
executor.hub_address = self.monitoring.hub_address
executor.hub_zmq_port = self.monitoring.hub_zmq_port
executor.submit_monitoring_radio = self.monitoring.radio
# this will modify the radio config object: it will add relevant parameters needed
# for the particular remote radio sender to communicate back
logger.info("starting monitoring receiver "
f"for executor {executor} "
f"with remote monitoring radio config {executor.remote_monitoring_radio_config}")
executor.monitoring_receiver = self.monitoring.start_receiver(executor.remote_monitoring_radio_config,
ip=self.monitoring.hub_address,
run_dir=self.run_dir)
# TODO: this is a weird way to start the receiver.
# Rather than in executor.start, but there's a tangle here
# trying to make the executors usable in a non-pure-parsl
# context where there is no DFK to grab config out of?
# (and no monitoring...)
if hasattr(executor, 'provider'):
if hasattr(executor.provider, 'script_dir'):
executor.provider.script_dir = os.path.join(self.run_dir, 'submit_scripts')
Expand Down
42 changes: 34 additions & 8 deletions parsl/executors/base.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,19 @@
import logging
import os
from abc import ABCMeta, abstractmethod
from concurrent.futures import Future
from typing import Any, Callable, Dict, Optional

from typing_extensions import Literal, Self

from parsl.monitoring.radios import MonitoringRadioSender
from parsl.monitoring.radios.base import (
MonitoringRadioReceiver,
MonitoringRadioSender,
RadioConfig,
)
from parsl.monitoring.radios.udp import UDPRadio

logger = logging.getLogger(__name__)


class ParslExecutor(metaclass=ABCMeta):
Expand All @@ -19,15 +27,13 @@ class ParslExecutor(metaclass=ABCMeta):
no arguments and re-raises any thrown exception.

In addition to the listed methods, a ParslExecutor instance must always
have a member field:
have these member fields:

label: str - a human readable label for the executor, unique
with respect to other executors.

Per-executor monitoring behaviour can be influenced by exposing:

radio_mode: str - a string describing which radio mode should be used to
send task resource data back to the submit side.
remote_monitoring_radio_config: RadioConfig describing how tasks on this executor
should report task resource status

An executor may optionally expose:

Expand All @@ -45,11 +51,16 @@ class ParslExecutor(metaclass=ABCMeta):
"""

label: str = "undefined"
radio_mode: str = "udp"

def __init__(
self,
*,

# TODO: I'd like these two to go away but they're needed right now
# to configure the interchange monitoring radio, that is
# in addition to the submit and worker monitoring radios (!). They
# are effectivley a third monitoring radio config, though, so what
# should that look like for the interchange?
hub_address: Optional[str] = None,
hub_zmq_port: Optional[int] = None,
submit_monitoring_radio: Optional[MonitoringRadioSender] = None,
Expand All @@ -58,10 +69,19 @@ def __init__(
):
self.hub_address = hub_address
self.hub_zmq_port = hub_zmq_port

# these are parameters for the monitoring radio to be used on the remote side
# eg. in workers - to send results back, and they should end up encapsulated
# inside a RadioConfig.
self.submit_monitoring_radio = submit_monitoring_radio
self.remote_monitoring_radio_config: RadioConfig = UDPRadio()

self.run_dir = os.path.abspath(run_dir)
self.run_id = run_id

# will be set externally later, which is pretty ugly
self.monitoring_receiver: Optional[MonitoringRadioReceiver] = None

def __enter__(self) -> Self:
return self

Expand Down Expand Up @@ -94,7 +114,13 @@ def shutdown(self) -> None:

This includes all attached resources such as workers and controllers.
"""
pass
logger.debug("Starting base executor shutdown")
# logger.error(f"BENC: monitoring receiver on {self} is {self.monitoring_receiver}")
if self.monitoring_receiver is not None:
logger.debug("Starting monitoring receiver shutdown")
self.monitoring_receiver.shutdown()
logger.debug("Done with monitoring receiver shutdown")
logger.debug("Done with base executor shutdown")

def monitor_resources(self) -> bool:
"""Should resource monitoring happen for tasks on running on this executor?
Expand Down
16 changes: 14 additions & 2 deletions parsl/executors/high_throughput/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
)
from parsl.executors.status_handling import BlockProviderExecutor
from parsl.jobs.states import TERMINAL_STATES, JobState, JobStatus
from parsl.monitoring.radios.base import RadioConfig
from parsl.monitoring.radios.htex import HTEXRadio
from parsl.process_loggers import wrap_with_logs
from parsl.providers import LocalProvider
from parsl.providers.base import ExecutionProvider
Expand Down Expand Up @@ -253,11 +255,13 @@ def __init__(self,
worker_logdir_root: Optional[str] = None,
manager_selector: ManagerSelector = RandomManagerSelector(),
block_error_handler: Union[bool, Callable[[BlockProviderExecutor, Dict[str, JobStatus]], None]] = True,
encrypted: bool = False):
encrypted: bool = False,
remote_monitoring_radio_config: Optional[RadioConfig] = None):

logger.debug("Initializing HighThroughputExecutor")

BlockProviderExecutor.__init__(self, provider=provider, block_error_handler=block_error_handler)

self.label = label
self.worker_debug = worker_debug
self.storage_access = storage_access
Expand Down Expand Up @@ -300,6 +304,12 @@ def __init__(self,
self._workers_per_node = 1 # our best guess-- we do not have any provider hints

self._task_counter = 0

if remote_monitoring_radio_config is not None:
self.remote_monitoring_radio_config = remote_monitoring_radio_config
else:
self.remote_monitoring_radio_config = HTEXRadio()

self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_proc: Optional[subprocess.Popen] = None
Expand All @@ -322,7 +332,6 @@ def __init__(self,
interchange_launch_cmd = DEFAULT_INTERCHANGE_LAUNCH_CMD
self.interchange_launch_cmd = interchange_launch_cmd

radio_mode = "htex"
enable_mpi_mode: bool = False
mpi_launcher: str = "mpiexec"

Expand Down Expand Up @@ -832,6 +841,9 @@ def shutdown(self, timeout: float = 10.0):
logger.info("Closing command client")
self.command_client.close()

# TODO: implement this across all executors
super().shutdown()

logger.info("Finished HighThroughputExecutor shutdown attempt")

def get_usage_information(self):
Expand Down
3 changes: 2 additions & 1 deletion parsl/executors/high_throughput/interchange.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
from parsl.executors.high_throughput.manager_record import ManagerRecord
from parsl.executors.high_throughput.manager_selector import ManagerSelector
from parsl.monitoring.message_type import MessageType
from parsl.monitoring.radios import MonitoringRadioSender, ZMQRadioSender
from parsl.monitoring.radios.base import MonitoringRadioSender
from parsl.monitoring.radios.zmq import ZMQRadioSender
from parsl.process_loggers import wrap_with_logs
from parsl.serialize import serialize as serialize_object
from parsl.utils import setproctitle
Expand Down
14 changes: 12 additions & 2 deletions parsl/executors/high_throughput/mpi_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from parsl.executors.status_handling import BlockProviderExecutor
from parsl.jobs.states import JobStatus
from parsl.launchers import SimpleLauncher
from parsl.monitoring.radios.base import RadioConfig
from parsl.providers import LocalProvider
from parsl.providers.base import ExecutionProvider

Expand Down Expand Up @@ -66,7 +67,8 @@ def __init__(self,
worker_logdir_root: Optional[str] = None,
mpi_launcher: str = "mpiexec",
block_error_handler: Union[bool, Callable[[BlockProviderExecutor, Dict[str, JobStatus]], None]] = True,
encrypted: bool = False):
encrypted: bool = False,
remote_monitoring_radio_config: Optional[RadioConfig] = None):
super().__init__(
# Hard-coded settings
cores_per_worker=1e-9, # Ensures there will be at least an absurd number of workers
Expand All @@ -92,7 +94,15 @@ def __init__(self,
address_probe_timeout=address_probe_timeout,
worker_logdir_root=worker_logdir_root,
block_error_handler=block_error_handler,
encrypted=encrypted
encrypted=encrypted,

# TODO:
# worker-side monitoring in MPI-style code is probably going to be
# broken - resource monitoring won't see any worker processes
# most likely, as so perhaps it should have worker resource
# monitoring disabled like the thread pool executor has?
# (for related but different reasons...)
remote_monitoring_radio_config=remote_monitoring_radio_config
)
self.enable_mpi_mode = True
self.mpi_launcher = mpi_launcher
Expand Down
2 changes: 2 additions & 0 deletions parsl/executors/taskvine/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -605,6 +605,8 @@ def shutdown(self, *args, **kwargs):
self._finished_task_queue.close()
self._finished_task_queue.join_thread()

super().shutdown()

logger.debug("TaskVine shutdown completed")

@wrap_with_logs
Expand Down
1 change: 1 addition & 0 deletions parsl/executors/threads.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def shutdown(self, block=True):
"""
logger.debug("Shutting down executor, which involves waiting for running tasks to complete")
self.executor.shutdown(wait=block)
super().shutdown()
logger.debug("Done with executor shutdown")

def monitor_resources(self):
Expand Down
2 changes: 2 additions & 0 deletions parsl/executors/workqueue/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -716,6 +716,8 @@ def shutdown(self, *args, **kwargs):
self.collector_queue.close()
self.collector_queue.join_thread()

super().shutdown()

logger.debug("Work Queue shutdown completed")

@wrap_with_logs
Expand Down
12 changes: 6 additions & 6 deletions parsl/monitoring/db_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,20 +279,20 @@ class Resource(Base):
class DatabaseManager:
def __init__(self,
db_url: str = 'sqlite:///runinfo/monitoring.db',
logdir: str = '.',
run_dir: str = '.',
logging_level: int = logging.INFO,
batching_interval: float = 1,
batching_threshold: float = 99999,
):

self.workflow_end = False
self.workflow_start_message: Optional[MonitoringMessage] = None
self.logdir = logdir
os.makedirs(self.logdir, exist_ok=True)
self.run_dir = run_dir
os.makedirs(self.run_dir, exist_ok=True)

logger.propagate = False

set_file_logger("{}/database_manager.log".format(self.logdir), level=logging_level,
set_file_logger("{}/database_manager.log".format(self.run_dir), level=logging_level,
format_string="%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] [%(threadName)s %(thread)d] %(message)s",
name="database_manager")

Expand Down Expand Up @@ -681,7 +681,7 @@ def close(self) -> None:
def dbm_starter(exception_q: mpq.Queue,
resource_msgs: mpq.Queue,
db_url: str,
logdir: str,
run_dir: str,
logging_level: int) -> None:
"""Start the database manager process

Expand All @@ -692,7 +692,7 @@ def dbm_starter(exception_q: mpq.Queue,

try:
dbm = DatabaseManager(db_url=db_url,
logdir=logdir,
run_dir=run_dir,
logging_level=logging_level)
logger.info("Starting dbm in dbm starter")
dbm.start(resource_msgs)
Expand Down
Loading