Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

I351 list results #358

Merged
merged 9 commits into from
Apr 17, 2019
73 changes: 71 additions & 2 deletions src/smif/cli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,12 +100,71 @@


def list_model_runs(args):
"""List the model runs defined in the config
"""List the model runs defined in the config, optionally indicating whether complete
results exist.
"""
store = _get_store(args)
model_run_configs = store.read_model_runs()

if args.complete:
print('Model runs with an asterisk (*) have complete results available\n')

for run in model_run_configs:
print(run['name'])
run_name = run['name']

if args.complete:
expected_results = store.canonical_expected_results(run_name)
available_results = store.canonical_available_results(run_name)

complete = ' *' if expected_results == available_results else ''

print('{}{}'.format(run_name, complete))
else:
print(run_name)


def list_available_results(args):
"""List the available results for a specified model run.
"""

store = _get_store(args)
expected = store.canonical_expected_results(args.model_run)
available = store.available_results(args.model_run)

# Print run and sos model
run = store.read_model_run(args.model_run)
print('\nmodel run: {}'.format(args.model_run))
print('{}- sos model: {}'.format(' ' * 2, run['sos_model']))

# List of expected sector models
sec_models = sorted({sec for _t, _d, sec, _out in expected})

for sec_model in sec_models:
print('{}- sector model: {}'.format(' ' * 4, sec_model))

# List expected outputs for this sector model
outputs = sorted({out for _t, _d, sec, out in expected if sec == sec_model})

for output in outputs:
print('{}- output: {}'.format(' ' * 6, output))

# List available decisions for this sector model and output
decs = sorted({d for _t, d, sec, out in available if
sec == sec_model and out == output})

if len(decs) == 0:
print('{}- no results'.format(' ' * 8))

for dec in decs:
base_str = '{}- decision {}:'.format(' ' * 8, dec)

# List available time steps for this decision, sector model and output
ts = sorted({t for t, d, sec, out in available if
d == dec and sec == sec_model and out == output})
assert(len(ts) > 0), "If a decision is available, so is at least one time step"

res_str = ', '.join([str(t) for t in ts])
print('{} {}'.format(base_str, res_str))


def run_model_runs(args):
Expand Down Expand Up @@ -244,6 +303,16 @@ def parse_arguments():
parser_list = subparsers.add_parser(
'list', help='List available model runs', parents=[parent_parser])
parser_list.set_defaults(func=list_model_runs)
parser_list.add_argument('-c', '--complete',
help="Show which model runs have complete results",
action='store_true')

# RESULTS
parser_results = subparsers.add_parser(
'available_results', help='List available results', parents=[parent_parser])
parser_results.set_defaults(func=list_available_results)
parser_results.add_argument('model_run',
help="Name of the model run to list available results")

# APP
parser_app = subparsers.add_parser(
Expand Down
78 changes: 77 additions & 1 deletion src/smif/data_layer/store.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@
When unable to read data e.g. unable to handle file type or connect
to database
"""
import itertools
from copy import deepcopy
from logging import getLogger
from operator import itemgetter
from typing import Dict, List, Optional

import numpy as np # type: ignore

from smif.data_layer import DataArray
from smif.data_layer.abstract_data_store import DataStore
from smif.data_layer.abstract_metadata_store import MetadataStore
Expand Down Expand Up @@ -871,6 +871,82 @@ def prepare_warm_start(self, model_run_name):
else:
max_timestep = None
return max_timestep

def canonical_available_results(self, model_run_name):
"""List the results that are available from a model run, collapsing all decision
iterations.

This is the unique items from calling `available_results`, with all decision iterations
set to 0.

This method is used to determine whether a model run is complete, given that it is
impossible to know how many decision iterations to expect: we simply check that each
expected timestep has been completed.

Parameters
----------
model_run_name : str

Returns
-------
set Set of tuples representing available results
"""

available_results = self.available_results(model_run_name)

canonical_list = []

for t, d, sec_model_name, output_name in available_results:
canonical_list.append((t, 0, sec_model_name, output_name))

# Return as a set to remove duplicates
return set(canonical_list)

def canonical_expected_results(self, model_run_name):
"""List the results that are expected from a model run, collapsing all decision
iterations.

For a complete model run, this would coincide with the unique list returned from
`available_results`, where all decision iterations are set to 0.

This method is used to determine whether a model run is complete, given that it is
impossible to know how many decision iterations to expect: we simply check that each
expected timestep has been completed.

Parameters
----------
model_run_name : str

Returns
-------
set Set of tuples representing expected results
"""

# Model results are returned as a tuple
# (timestep, decision_it, sec_model_name, output_name)
# so we first build the full list of expected results tuples.

expected_results = []

# Get the sos model name given the model run name, and the full list of timesteps
model_run = self.read_model_run(model_run_name)
timesteps = sorted(model_run['timesteps'])
sos_model_name = model_run['sos_model']

# Get the list of sector models in the sos model
sos_config = self.read_sos_model(sos_model_name)

# For each sector model, get the outputs and create the tuples
for sec_model_name in sos_config['sector_models']:

sec_model_config = self.read_model(sec_model_name)
outputs = sec_model_config['outputs']

for output, t in itertools.product(outputs, timesteps):
expected_results.append((t, 0, sec_model_name, output['name']))

# Return as a set to remove duplicates
return set(expected_results)
# endregion

# region data store utilities
Expand Down
48 changes: 46 additions & 2 deletions tests/cli/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,8 @@
from tempfile import TemporaryDirectory
from unittest.mock import call, patch

from pytest import fixture

import smif
from pytest import fixture
from smif.cli import confirm, parse_arguments, setup_project_folder


Expand Down Expand Up @@ -110,6 +109,51 @@ def test_fixture_list_runs(tmp_sample_project):
assert "energy_water_cp_cr" in str(output.stdout)
assert "energy_central" in str(output.stdout)

# Run energy_central and re-check output with optional flag for completed results
subprocess.run(["smif", "run", "energy_central", "-d", config_dir], stdout=subprocess.PIPE)
output = subprocess.run(["smif", "list", "-c", "-d", config_dir], stdout=subprocess.PIPE)
assert "energy_central *" in str(output.stdout)


def test_fixture_available_results(tmp_sample_project):
"""Test running the filesystem-based single_run fixture
"""
config_dir = tmp_sample_project
output = subprocess.run(["smif", "available_results", "energy_central", "-d", config_dir],
stdout=subprocess.PIPE)

out_str = str(output.stdout)
assert(out_str.count('model run: energy_central') == 1)
assert(out_str.count('sos model: energy') == 1)
assert(out_str.count('sector model:') == 1)
assert(out_str.count('output:') == 2)
assert(out_str.count('output: cost') == 1)
assert(out_str.count('output: water_demand') == 1)
assert(out_str.count('no results') == 2)
assert(out_str.count('decision') == 0)

# Run energy_central and re-check output with optional flag for completed results
subprocess.run(["smif", "run", "energy_central", "-d", config_dir], stdout=subprocess.PIPE)
output = subprocess.run(["smif", "available_results", "energy_central", "-d", config_dir],
stdout=subprocess.PIPE)

out_str = str(output.stdout)
assert(out_str.count('model run: energy_central') == 1)
assert(out_str.count('sos model: energy') == 1)
assert(out_str.count('sector model:') == 1)
assert(out_str.count('output:') == 2)
assert(out_str.count('output: cost') == 1)
assert(out_str.count('output: water_demand') == 1)
assert(out_str.count('no results') == 0)
assert(out_str.count('decision') == 8)
assert(out_str.count('decision 1') == 2)
assert(out_str.count('decision 2') == 2)
assert(out_str.count('decision 3') == 2)
assert(out_str.count('decision 4') == 2)
assert(out_str.count(': 2010') == 4)
assert(out_str.count(': 2015') == 2)
assert(out_str.count(': 2020') == 2)


def test_setup_project_folder():
"""Test contents of the setup project folder
Expand Down
36 changes: 36 additions & 0 deletions tests/data_layer/test_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,3 +314,39 @@ def test_warm_start(self, store, sample_results):
timestep = 2020
store.write_results(sample_results, 'test_model_run', 'model_name', timestep)
assert store.prepare_warm_start('test_model_run') == timestep

def test_canonical_available_results(self, store, sample_results):

store.write_results(sample_results, 'model_run_name', 'model_name', 2010, 0)
store.write_results(sample_results, 'model_run_name', 'model_name', 2015, 0)
store.write_results(sample_results, 'model_run_name', 'model_name', 2010, 1)
store.write_results(sample_results, 'model_run_name', 'model_name', 2015, 1)
store.write_results(sample_results, 'model_run_name', 'model_name', 2020, 1)

output_name = sample_results.spec.name

correct_results = set()
correct_results.add((2010, 0, 'model_name', output_name))
correct_results.add((2015, 0, 'model_name', output_name))
correct_results.add((2020, 0, 'model_name', output_name))

assert(store.canonical_available_results('model_run_name') == correct_results)

def test_canonical_expected_results(
self, store, sample_dimensions, get_sos_model, get_sector_model,
energy_supply_sector_model, model_run
):

for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model_run(model_run)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)

correct_results = set()
correct_results.add((2015, 0, 'energy_demand', 'gas_demand'))
correct_results.add((2020, 0, 'energy_demand', 'gas_demand'))
correct_results.add((2025, 0, 'energy_demand', 'gas_demand'))

assert(store.canonical_expected_results(model_run['name']) == correct_results)