diff --git a/python/lsst/rubintv/analysis/service/commands/db.py b/python/lsst/rubintv/analysis/service/commands/db.py index dc92c68..27ebcd3 100644 --- a/python/lsst/rubintv/analysis/service/commands/db.py +++ b/python/lsst/rubintv/analysis/service/commands/db.py @@ -102,18 +102,13 @@ def build_contents(self, data_center: DataCenter) -> dict: data = database.query(self.columns, query) if not data: - # There is no column data to return - content: dict = { - "schema": self.database, - "columns": self.columns, - "data": [], - } - else: - content = { - "schema": self.database, - "columns": list(data.keys()), - "data": data, - } + # There is no data to return + data = [] + content = { + "schema": self.database, + "columns": self.columns, + "data": data, + } return content diff --git a/python/lsst/rubintv/analysis/service/database.py b/python/lsst/rubintv/analysis/service/database.py index 653c722..488b15f 100644 --- a/python/lsst/rubintv/analysis/service/database.py +++ b/python/lsst/rubintv/analysis/service/database.py @@ -360,34 +360,25 @@ def query( """ table_columns = set() table_names = set() - column_names = set() # get the sql alchemy model for each column for column in columns: table_name, column_name = column.split(".") table_names.add(table_name) - column_names.add(column_name) column_obj = self.get_column(column) # Label each column as 'table_name.column_name' table_columns.add(column_obj.label(f"{table_name}.{column_name}")) - # Add either the visit_id or exposure_id column - # (depending on the primary key of the table) if necessary. - # Since we currently only join exposure tables or visit tables together - # (in order to simplify the design) - # we only need to check which subset of tables the first table is in. - table_name = next(iter(table_names)) - if table_name in _exposure_tables: - index_column_name = "exposure_id" - elif table_name in _visit_tables: - index_column_name = "visit_id" + # Add the data Ids (seq_num and day_obs) to the query. + def add_data_ids(table_name: str): + day_obs_column = self.get_column(f"{table_name}.day_obs") + seq_num_column = self.get_column(f"{table_name}.seq_num") + # Strip off the table name to make the data IDs uniform + table_columns.add(day_obs_column.label("day_obs")) + table_columns.add(seq_num_column.label("seq_num")) + if "visit1" in table_names: + add_data_ids("visit1") else: - raise ValueError(f"Could not find a primary key for table {table_name}") - if index_column_name not in column_names: - # Add the index column to the query - index_column = self.get_column(f"{table_name}.{index_column_name}") - if index_column is None: - raise ValueError(f"Could not find a primary key for table {table_name}") - table_columns.add(index_column.label(f"{table_name}.{index_column_name}")) + add_data_ids("exposure") # generate the query query_model = sqlalchemy.and_(*[col.isnot(None) for col in table_columns]) @@ -407,7 +398,12 @@ def query( # Build the query query_model = sqlalchemy.select(*table_columns).select_from(select_from).where(query_model) - print(f"Query: {query_model}") + + # Temporary, for testing. TODO: remove this code block before merging + _log_level = logger.getEffectiveLevel() + logger.setLevel(logging.INFO) + logger.info(f"Query: {query_model}") + logger.setLevel(_log_level) # Fetch the data connection = self.engine.connect()