diff --git a/.github/workflows/doc.yaml b/.github/workflows/doc.yaml new file mode 100644 index 000000000..d96b6eaa3 --- /dev/null +++ b/.github/workflows/doc.yaml @@ -0,0 +1,87 @@ +name: DOC + +on: + push: + branches: + - master + - develop + - 'release/*' + + tags: + - '*' + + pull_request: + branches: + - master + - develop + - 'release/*' + +env: + MAIN_REPO: IN-CORE/pyincore + +jobs: + + # ---------------------------------------------------------------------- + # DOCKER BUILD + # ---------------------------------------------------------------------- + docker: + runs-on: ubuntu-latest + + steps: + # checkout source code + - uses: actions/checkout@v2 + + # calculate some variables that are used later + - name: version information + run: | + if [ "${{ github.event.release.target_commitish }}" != "" ]; then + BRANCH="${{ github.event.release.target_commitish }}" + elif [[ $GITHUB_REF =~ pull ]]; then + BRANCH="$(echo $GITHUB_REF | sed 's#refs/pull/\([0-9]*\)/merge#PR-\1#')" + else + BRANCH=${GITHUB_REF##*/} + fi + echo "GITHUB_BRANCH=${BRANCH}" >> $GITHUB_ENV + if [ "$BRANCH" == "master" ]; then + version=$(awk -F= '/^release/ { print $2}' docs/source/conf.py | sed "s/[ ']//g") + tags="latest" + oldversion="" + while [ "${oldversion}" != "${version}" ]; do + oldversion="${version}" + tags="${tags},${version}" + version=${version%.*} + done + echo "VERSION=${version}" >> $GITHUB_ENV + echo "TAGS=${tags}" >> $GITHUB_ENV + elif [ "$BRANCH" == "develop" ]; then + echo "VERSION=develop" >> $GITHUB_ENV + echo "TAGS=develop" >> $GITHUB_ENV + else + echo "VERSION=testing" >> $GITHUB_ENV + echo "TAGS=${BRANCH}" >> $GITHUB_ENV + fi + + # build image + - name: Build image + uses: elgohr/Publish-Docker-Github-Action@3.04 + with: + name: incore/doc/pyincore + no_push: true + + # this will publish to NCSA + - name: Publish to NCSA Hub + #if: github.event_name != 'pull_request' && github.repository == env.MAIN_REPO + if: github.repository == env.MAIN_REPO + uses: elgohr/Publish-Docker-Github-Action@3.04 + env: + BRANCH: ${{ env.GITHUB_BRANCH }} + VERSION: ${{ env.VERSION }} + BUILDNUMBER: ${{ github.run_number }} + GITSHA1: ${{ github.sha }} + with: + registry: hub.ncsa.illinois.edu + name: incore/doc/pyincore + username: ${{ secrets.HUB_USERNAME }} + password: ${{ secrets.HUB_PASSWORD }} + tags: "${{ env.TAGS }}" + buildargs: BRANCH,VERSION,BUILDNUMBER,GITSHA1 \ No newline at end of file diff --git a/.github/workflows/pytests.yml b/.github/workflows/pytests.yml new file mode 100644 index 000000000..a1711db0d --- /dev/null +++ b/.github/workflows/pytests.yml @@ -0,0 +1,48 @@ +name: pytests + +# Runs unit tests on: +# - any push to any branch +# - when a PR is opened/reopened - this is just for additional safety and covers the case of master -> develop PRs without new pushes + +# TODO: Use mamba instead of conda for installing packages. Improves on ~5mins it takes to install requirements. +# https://github.com/mamba-org/provision-with-micromamba + +on: + push: + + pull_request: + types: [opened, reopened] + +jobs: + unit_test: + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v2 + + - name: Cache conda + uses: actions/cache@v2 + env: + # Increase this value to reset cache if environment.yml has not changed + CACHE_NUMBER: 0 + with: + path: ~/conda_pkgs_dir + key: + ${{ runner.os }}-conda-${{ env.CACHE_NUMBER }}-${{ + hashFiles('environment.yml') }} + + - name: Install miniconda + uses: conda-incubator/setup-miniconda@v2 + with: + miniconda-version: "latest" + mamba-version: "*" + use-mamba: true + python-version: 3.8 + activate-environment: base + environment-file: environment.yml + use-only-tar-bz2: true + + - name: Run tests with pytest + run: | + echo "${{secrets.PYTEST_USER_TOKEN}}" > tests/pyincore/.incorepw + $CONDA/bin/python -m pytest --ignore=tests/test_format.py --ignore=tests/pyincore/analyses diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e7053b62..f7df4847b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,26 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [1.1.0] - 2021-10-27 + +### Added +- Convert HUA and PD outputs to JSON [#9](https://github.com/IN-CORE/pyincore/issues/9) +- Convert population dislocation output to heatmap [#3](https://github.com/IN-CORE/pyincore/issues/3) +- Joplin empirical restoration analysis [#28](https://github.com/IN-CORE/pyincore/issues/28) +- GitHub action to run unit tests [#26](https://github.com/IN-CORE/pyincore/issues/26) +- GitHub action to build documentation [#23](https://github.com/IN-CORE/pyincore/issues/23) +- Conda recipe [#17](https://github.com/IN-CORE/pyincore/issues/17) + +### Changed +- Percent change in utils converting CGE output to JSON [#34](https://github.com/IN-CORE/pyincore/issues/34) +- Show API response messages that services return [#6](https://github.com/IN-CORE/pyincore/issues/6) +- Removed deprecated methods [#7](https://github.com/IN-CORE/pyincore/issues/7) + +### Fixed +- Pass dataset type as parameter to from_dataframe method [#8](https://github.com/IN-CORE/pyincore/issues/8) +- PEP8 styling issues [#20](https://github.com/IN-CORE/pyincore/issues/20) +- Corrections to residential building recovery [#25](https://github.com/IN-CORE/pyincore/issues/25) + ## [1.0.0] - 2021-08-31 ### Changed - Improve runtime efficiency of residential recovery analysis [INCORE1-1339](https://opensource.ncsa.illinois.edu/jira/browse/INCORE1-1339) diff --git a/Dockerfile.docs b/Dockerfile similarity index 100% rename from Dockerfile.docs rename to Dockerfile diff --git a/docs/source/conf.py b/docs/source/conf.py index 0e4adc1a9..ec04374f7 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -33,9 +33,9 @@ author = '' # The short X.Y version -version = '1.0' +version = '1.1' # The full version, including alpha/beta/rc tags -release = '1.0.0' +release = '1.1.0' # -- General configuration --------------------------------------------------- diff --git a/docs/source/modules.rst b/docs/source/modules.rst index de975a5a4..dc3f3e4b9 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -59,7 +59,7 @@ analyses/example :members: analyses/housingrecoverysequential -============================== +================================== .. autoclass:: housingrecoverysequential.housingrecoverysequential.HousingRecoverySequential :members: @@ -89,6 +89,13 @@ analyses/joplincge .. autofunction:: joplincge.outputfunctions.get_diff :members: +analyses/joplinempiricalrestoration +=================================== +.. autoclass:: joplinempiricalrestoration.joplinempiricalrestoration.JoplinEmpiricalRestoration + :members: +.. autoclass:: joplinempiricalrestoration.joplinempirrestor_util.JoplinEmpirRestorUtil + :members: + analyses/meandamage =================== .. autoclass:: meandamage.meandamage.MeanDamage @@ -126,7 +133,7 @@ analyses/populationdislocation :members: analyses/residentialbuildingrecovery -============================ +==================================== .. autoclass:: residentialbuildingrecovery.residentialbuildingrecovery.ResidentialBuildingRecovery :members: @@ -252,6 +259,11 @@ utils/analysisutil .. autoclass:: utils.analysisutil.AnalysisUtil :members: +utils/cgeoutputprocess +====================== +.. autoclass:: utils.cgeoutputprocess.CGEOutputProcess + :members: + utils/dataprocessutil ===================== .. autoclass:: utils.dataprocessutil.DataProcessUtil @@ -272,6 +284,11 @@ utils/geoutil .. autoclass:: utils.geoutil.GeoUtil :members: +utils/popdisloutputprocess.py +============================= +.. autoclass:: utils.popdisloutputprocess.PopDislOutputProcess + :members: + utils/networkutil ================= .. autoclass:: utils.networkutil.NetworkUtil diff --git a/environment.yml b/environment.yml new file mode 100644 index 000000000..1e69a59b7 --- /dev/null +++ b/environment.yml @@ -0,0 +1,27 @@ +name: base +channels: + - conda-forge + - defaults +dependencies: + - boto3 + - deprecated + - fiona>=1.8.4 + - geopandas>=0.6.1 + - ipopt>=3.11 + - jsonpickle>=1.1 + - networkx>=2.2 + - numpy>=1.16.6,<2.0a0 + - owslib>=0.17.1 + - pandas>=0.24.1 + - pycodestyle>=2.6.0 + - pyomo>=5.6 + - pyproj>=1.9.6 + - pytest>=3.9.0 + - python-jose>=3.0 + - pyyaml>=3.13 + - rasterio>=1.0.18 + - requests>=2.21.0 + - rtree>=0.8.3 + - scipy>=1.2.0 + - shapely>=1.6.4.post1 + - wntr>=0.1.6 diff --git a/pyincore/__init__.py b/pyincore/__init__.py index 7ca599fef..d4303cf68 100644 --- a/pyincore/__init__.py +++ b/pyincore/__init__.py @@ -20,15 +20,11 @@ from pyincore.restorationservice import RestorationService from pyincore.spaceservice import SpaceService from pyincore.utils.analysisutil import AnalysisUtil +from pyincore.utils.popdisloutputprocess import PopDislOutputProcess +from pyincore.utils.cgeoutputprocess import CGEOutputProcess from pyincore.dataset import Dataset, InventoryDataset, DamageRatioDataset from pyincore.models.fragilitycurveset import FragilityCurveSet -from pyincore.models.standardfragilitycurve import StandardFragilityCurve -from pyincore.models.periodbuildingfragilitycurve import PeriodBuildingFragilityCurve -from pyincore.models.periodstandardfragilitycurve import PeriodStandardFragilityCurve -from pyincore.models.customexpressionfragilitycurve import CustomExpressionFragilityCurve -from pyincore.models.conditionalstandardfragilitycurve import ConditionalStandardFragilityCurve -from pyincore.models.parametricfragilitycurve import ParametricFragilityCurve -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve from pyincore.models.mappingset import MappingSet from pyincore.models.mapping import Mapping from pyincore.networkdata import NetworkData diff --git a/pyincore/analyses/bridgedamage/bridgedamage.py b/pyincore/analyses/bridgedamage/bridgedamage.py index b9f08d045..65249803f 100644 --- a/pyincore/analyses/bridgedamage/bridgedamage.py +++ b/pyincore/analyses/bridgedamage/bridgedamage.py @@ -13,7 +13,7 @@ from pyincore import AnalysisUtil, GeoUtil from pyincore import BaseAnalysis, HazardService, FragilityService from pyincore.analyses.bridgedamage.bridgeutil import BridgeUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class BridgeDamage(BaseAnalysis): @@ -175,7 +175,7 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, dmg_intervals = dict() selected_fragility_set = fragility_set[bridge["id"]] - if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurveRefactored): + if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurve): # Supports multiple demand types in same fragility hazard_val = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] @@ -190,9 +190,9 @@ def bridge_damage_analysis_bulk_input(self, bridges, hazard_type, if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): bridge_args = selected_fragility_set.construct_expression_args_from_inventory(bridge) dmg_probability = \ - selected_fragility_set.calculate_limit_state_refactored_w_conversion(hval_dict, - inventory_type="bridge", - **bridge_args) + selected_fragility_set.calculate_limit_state(hval_dict, + inventory_type="bridge", + **bridge_args) dmg_intervals = selected_fragility_set.calculate_damage_interval(dmg_probability, hazard_type=hazard_type, inventory_type="bridge") diff --git a/pyincore/analyses/buildingdamage/buildingdamage.py b/pyincore/analyses/buildingdamage/buildingdamage.py index e129c96bb..36db97f7b 100755 --- a/pyincore/analyses/buildingdamage/buildingdamage.py +++ b/pyincore/analyses/buildingdamage/buildingdamage.py @@ -11,7 +11,7 @@ from pyincore import BaseAnalysis, HazardService, \ FragilityService, AnalysisUtil, GeoUtil from pyincore.analyses.buildingdamage.buildingutil import BuildingUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class BuildingDamage(BaseAnalysis): @@ -174,7 +174,7 @@ def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, haza building_period = selected_fragility_set.fragility_curves[0].get_building_period(num_stories) # TODO: Once all fragilities are migrated to new format, we can remove this condition - if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurveRefactored): + if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurve): # Supports multiple demand types in same fragility b_haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) b_demands = hazard_vals[i]["demands"] @@ -192,7 +192,7 @@ def building_damage_analysis_bulk_input(self, buildings, retrofit_strategy, haza if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): building_args = selected_fragility_set.construct_expression_args_from_inventory(b) - dmg_probability = selected_fragility_set.calculate_limit_state_refactored_w_conversion( + dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, **building_args, period=building_period) dmg_interval = selected_fragility_set.calculate_damage_interval( dmg_probability, hazard_type=hazard_type, inventory_type="building") diff --git a/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py b/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py index a6a1102f0..5f4be6738 100644 --- a/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py +++ b/pyincore/analyses/cumulativebuildingdamage/cumulativebuildingdamage.py @@ -127,20 +127,21 @@ def cumulative_building_damage(self, eq_building_damage, limit_states = collections.OrderedDict() - limit_states["LS_0"] = eq_limit_states["LS_0"] + tsunami_limit_states["LS_0"] \ - - eq_limit_states["LS_0"] * tsunami_limit_states["LS_0"] - - limit_states["LS_1"] = eq_limit_states["LS_1"] + tsunami_limit_states["LS_1"] \ - - eq_limit_states["LS_1"] * tsunami_limit_states["LS_1"] \ - + ((eq_limit_states["LS_0"] - eq_limit_states["LS_1"]) * ( - tsunami_limit_states["LS_0"] - - tsunami_limit_states["LS_1"])) - - limit_states["LS_2"] = eq_limit_states["LS_2"] + tsunami_limit_states["LS_2"] \ - - eq_limit_states["LS_2"] * tsunami_limit_states["LS_2"] \ - + ((eq_limit_states["LS_1"] - eq_limit_states["LS_2"]) * ( - tsunami_limit_states["LS_1"] - - tsunami_limit_states["LS_2"])) + limit_states["LS_0"] = \ + eq_limit_states["LS_0"] + tsunami_limit_states["LS_0"] \ + - eq_limit_states["LS_0"] * tsunami_limit_states["LS_0"] + + limit_states["LS_1"] = \ + eq_limit_states["LS_1"] + tsunami_limit_states["LS_1"] \ + - eq_limit_states["LS_1"] * tsunami_limit_states["LS_1"] \ + + ((eq_limit_states["LS_0"] + - eq_limit_states["LS_1"]) * (tsunami_limit_states["LS_0"] - tsunami_limit_states["LS_1"])) + + limit_states["LS_2"] = \ + eq_limit_states["LS_2"] + tsunami_limit_states["LS_2"] \ + - eq_limit_states["LS_2"] * tsunami_limit_states["LS_2"] \ + + ((eq_limit_states["LS_1"] + - eq_limit_states["LS_2"]) * (tsunami_limit_states["LS_1"] - tsunami_limit_states["LS_2"])) damage_state = FragilityCurveSet._3ls_to_4ds(limit_states) diff --git a/pyincore/analyses/epfdamage/epfdamage.py b/pyincore/analyses/epfdamage/epfdamage.py index 1eb1a0c40..12ad9ef21 100644 --- a/pyincore/analyses/epfdamage/epfdamage.py +++ b/pyincore/analyses/epfdamage/epfdamage.py @@ -10,7 +10,7 @@ from pyincore import AnalysisUtil, GeoUtil from pyincore import BaseAnalysis, HazardService, FragilityService -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class EpfDamage(BaseAnalysis): @@ -81,8 +81,8 @@ def run(self): repeat(liq_geology_dataset_id)) self.set_result_csv_data("result", ds_results, name=self.get_parameter("result_name")) - self.set_result_json_data("metadata", damage_results, name=self.get_parameter("result_name") + - "_additional_info") + self.set_result_json_data("metadata", damage_results, + name=self.get_parameter("result_name") + "_additional_info") return True @@ -172,7 +172,7 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id, damage_result = dict() selected_fragility_set = fragility_set[epf["id"]] - if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurveRefactored): + if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurve): hazard_val = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) input_demand_types = hazard_vals[i]["demands"] input_demand_units = hazard_vals[i]["units"] @@ -184,9 +184,9 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id, j += 1 epf_args = selected_fragility_set.construct_expression_args_from_inventory(epf) - limit_states = selected_fragility_set.calculate_limit_state_refactored_w_conversion(hval_dict, - inventory_type='electric_facility', - **epf_args) + limit_states = selected_fragility_set.calculate_limit_state(hval_dict, + inventory_type='electric_facility', + **epf_args) else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") @@ -256,7 +256,8 @@ def epf_damage_analysis_bulk_input(self, epfs, hazard_type, hazard_dataset_id, liquefaction_prob = liquefaction_vals[i]['liqProbability'] selected_liq_fragility = liq_fragility_set[liq_epf_id] - pgd_limit_states = selected_liq_fragility.calculate_limit_state(liq_hazard_val, std_dev=std_dev) + pgd_limit_states = \ + selected_liq_fragility.calculate_limit_state_w_conversion(liq_hazard_val, std_dev=std_dev) # match id and add liqhaztype, liqhazval, liqprobability field as well as rewrite limit # states and dmg_interval diff --git a/pyincore/analyses/joplinempiricalrestoration/__init__.py b/pyincore/analyses/joplinempiricalrestoration/__init__.py new file mode 100644 index 000000000..2d8f8d0e6 --- /dev/null +++ b/pyincore/analyses/joplinempiricalrestoration/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + + +from pyincore.analyses.joplinempiricalrestoration.joplinempiricalrestoration import JoplinEmpiricalRestoration +from pyincore.analyses.joplinempiricalrestoration.joplinempirrestor_util import JoplinEmpirRestorUtil \ No newline at end of file diff --git a/pyincore/analyses/joplinempiricalrestoration/joplinempiricalrestoration.py b/pyincore/analyses/joplinempiricalrestoration/joplinempiricalrestoration.py new file mode 100755 index 000000000..63a1884f9 --- /dev/null +++ b/pyincore/analyses/joplinempiricalrestoration/joplinempiricalrestoration.py @@ -0,0 +1,169 @@ +# Copyright (c) 2021 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import numpy as np +import pandas as pd + +from pyincore import BaseAnalysis +from pyincore.analyses.joplinempiricalrestoration.joplinempirrestor_util import JoplinEmpirRestorUtil + + +class JoplinEmpiricalRestoration(BaseAnalysis): + """ Joplin Empirical Restoration Model generates a random realization for the restoration time of + a building damaged in a tornado event to be restored to a certain functionality level. Functionality + levels in this model are defined according to Koliou and van de Lindt (2020) and range from Functionality + Level 4 (FL4, the lowest functionality) to Functionality Level 0 (FL0, full functionality). + + Args: + incore_client (IncoreClient): Service authentication. + + """ + + def __init__(self, incore_client): + super(JoplinEmpiricalRestoration, self).__init__(incore_client) + + def run(self): + """ Executes Joplin empirical restoration model analysis. + + Returns: + bool: True if successful, False otherwise. + + """ + # Get seed + seed_i = self.get_parameter("seed") + target_fl = self.get_parameter("target_functionality_level") + + result_name = self.get_parameter("result_name") + + # Building dataset + building_set = self.get_input_dataset("buildings").get_dataframe_from_shapefile() + # Building damage dataset + building_dmg = self.get_input_dataset("building_dmg").get_dataframe_from_csv(low_memory=False) + + # merge and filter out archetypes > 5 + building_dmg_all = pd.merge(building_dmg, building_set, how="left", on="guid", copy=True, validate="1:1") + building_dmg_5 = building_dmg_all[["guid", "archetype", "LS_0", "LS_1", "LS_2", "haz_expose"]].copy() + building_func_5 = building_dmg_5[building_dmg_all["archetype"] <= 5] + + building_func = building_func_5[["guid", "LS_0", "LS_1", "LS_2", "haz_expose"]].copy() + building_func["targetFL"] = target_fl + + initial_func_level, restoration_days = self.get_restoration_days(seed_i, building_func) + building_func["initialFL"] = initial_func_level + building_func["restorDays"] = restoration_days + + building_func_fin = building_func[["guid", "initialFL", "targetFL", "restorDays"]] + csv_source = "dataframe" + self.set_result_csv_data("result", building_func_fin, result_name, csv_source) + + return True + + def get_restoration_days(self, seed_i, building_func): + """ Calculates restoration days. + + Args: + seed_i (int): Seed for random number generator to ensure replication if run as part + of a stochastic analysis, for example in connection with housing unit allocation analysis. + building_dmg (pd.DataFrame): Building damage dataset with guid, limit states, hazard exposure. + + Returns: + np.array: Initial functionality level based on damage state + np.array: Building restoration days. + + """ + fl_coef = JoplinEmpirRestorUtil.FL_COEF + + hazard_value = building_func[["haz_expose"]].to_numpy() != "no" + hazard_value = hazard_value.flatten() + + bdnp = building_func[["LS_0", "LS_1", "LS_2"]].to_numpy() + + # generate a random number between 0 and 1 and see where in boundaries it locates and use it to assign FL, + # for each building + rnd_num = np.random.uniform(0, 1, (len(building_func.index, ))) + bdnp_init = np.zeros(len(building_func.index, )).astype(int) # first, set all to 0 + bdnp_init = np.where(rnd_num < bdnp[:, 0], 1, bdnp_init) # if rnd_num < LS_0 set to 1 + bdnp_init = np.where(rnd_num < bdnp[:, 1], 2, bdnp_init) # if rnd_num < LS_0 set to 2 + bdnp_init = np.where(rnd_num < bdnp[:, 2], 3, bdnp_init) # if rnd_num < LS_0 set to 3 + + bdnp_target = building_func["targetFL"].to_numpy() + + means = fl_coef[bdnp_init, bdnp_target, 0] + sigmas = fl_coef[bdnp_init, bdnp_target, 1] + + np.random.seed(seed_i) + rest_days = np.random.lognormal(means, sigmas) + + # only when exposed to hazard, otherwise no damage and restoration = 0 + restoration_days = np.where(hazard_value, rest_days, 0).astype(int) + # F1-F4 notation + bdnp_init = bdnp_init + 1 + # F0 where not exposed to hazard + initial_func_level = np.where(hazard_value, bdnp_init, 0).astype(int) + + # Output of the model is restoration_days + return initial_func_level, restoration_days + + def get_spec(self): + """Get specifications of the Joplin empirical restoration analysis. + + Returns: + obj: A JSON object of specifications of the Joplin empirical restoration analysis. + + """ + return { + "name": "joplin-empirical-restoration", + "description": "Values (in days) for the predicted restoration time of the building.", + "input_parameters": [ + { + "id": "result_name", + "required": True, + "description": "result dataset name", + "type": str + }, + { + "id": "target_functionality_level", + "required": False, + "description": "Target functionality level for all infrastructure", + "type": int + }, + { + "id": "seed", + "required": False, + "description": "Initial seed for the tornado hazard value", + "type": int + } + ], + "input_datasets": [ + { + 'id': 'buildings', + 'required': True, + 'description': 'Building Inventory', + 'type': ['ergo:buildingInventoryVer4', 'ergo:buildingInventoryVer5', + 'ergo:buildingInventoryVer6', 'ergo:buildingInventoryVer7'], + }, + { + "id": "building_dmg", + "required": True, + "description": "Building damage results CSV file", + "type": ["ergo:buildingDamageVer4", + "ergo:buildingDamageVer5", + "ergo:buildingDamageVer6", + "ergo:buildingInventory", + "ergo:nsBuildingInventoryDamage", + "ergo:nsBuildingInventoryDamageVer2"] + } + ], + "output_datasets": [ + { + "id": "result", + "parent_type": "buildings", + "description": "A dataset containing results (format: CSV) with values (in days) for the predicted " + "restoration time of the building.", + "type": "incore:restorationTime" + } + ] + } diff --git a/pyincore/analyses/joplinempiricalrestoration/joplinempirrestor_util.py b/pyincore/analyses/joplinempiricalrestoration/joplinempirrestor_util.py new file mode 100644 index 000000000..a63eda9de --- /dev/null +++ b/pyincore/analyses/joplinempiricalrestoration/joplinempirrestor_util.py @@ -0,0 +1,50 @@ +# Copyright (c) 2021 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import numpy as np + + +class JoplinEmpirRestorUtil: + """Utility methods for the Joplin restoration analysis.""" + + # Empirical coefficients mean and sigma for lognormal distribution + FL_COEF = np.zeros((4, 4, 2), dtype=float) + # initial functionality is FL1 + # target functionality is FL0 + FL_COEF[0, 0, :] = [5.90, 0.5] + # other (for consistency) + FL_COEF[1, 1, :] = [0.0, 1.0] + FL_COEF[1, 2, :] = [0.0, 1.0] + FL_COEF[1, 3, :] = [0.0, 1.0] + + # initial functionality is FL2 + # target functionality is FL0 + FL_COEF[1, 0, :] = [6.06, 0.32] + # target functionality is FL1 + FL_COEF[1, 1, :] = [5.87, 0.35] + # other (for consistency) + FL_COEF[1, 2, :] = [0.0, 1.0] + FL_COEF[1, 3, :] = [0.0, 1.0] + + # initial functionality is FL3 + # target functionality is FL0 + FL_COEF[2, 0, :] = [5.90, 0.60] + # target functionality is FL1 + FL_COEF[2, 1, :] = [5.74, 0.55] + # target functionality is FL2 + FL_COEF[2, 2, :] = [5.56, 0.46] + # other (for consistency) + FL_COEF[2, 3, :] = [0.0, 1.0] + + # initial functionality is FL4 + # target functionality is FL0 + FL_COEF[3, 0, :] = [6.60, 0.53] + # target functionality is FL1 + FL_COEF[3, 1, :] = [6.39, 0.48] + # target functionality is FL2 + FL_COEF[3, 2, :] = [6.49, 0.6] + # target functionality is FL3 + FL_COEF[3, 3, :] = [6.13, 0.33] diff --git a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py index 559558045..beef894fc 100644 --- a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py +++ b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingdamage.py @@ -10,7 +10,7 @@ from pyincore import BaseAnalysis, HazardService, FragilityService from pyincore.analyses.nonstructbuildingdamage.nonstructbuildingutil import \ NonStructBuildingUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class NonStructBuildingDamage(BaseAnalysis): @@ -203,7 +203,7 @@ def building_damage_analysis_bulk_input(self, buildings): ############### # AS - if isinstance(fragility_set_as.fragility_curves[0], FragilityCurveRefactored): + if isinstance(fragility_set_as.fragility_curves[0], FragilityCurve): hazard_vals_as = AnalysisUtil.update_precision_of_lists(hazard_resp_as[i]["hazardValues"]) demand_types_as = hazard_resp_as[i]["demands"] demand_units_as = hazard_resp_as[i]["units"] @@ -213,8 +213,8 @@ def building_damage_analysis_bulk_input(self, buildings): if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp_as[i]["hazardValues"]): building_args = fragility_set_as.construct_expression_args_from_inventory(building) dmg_probability_as = fragility_set_as. \ - calculate_limit_state_refactored_w_conversion(hval_dict_as, inventory_type="building", - **building_args) + calculate_limit_state(hval_dict_as, inventory_type="building", + **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: @@ -232,7 +232,7 @@ def building_damage_analysis_bulk_input(self, buildings): ############### # DS - if isinstance(fragility_set_ds.fragility_curves[0], FragilityCurveRefactored): + if isinstance(fragility_set_ds.fragility_curves[0], FragilityCurve): hazard_vals_ds = AnalysisUtil.update_precision_of_lists(hazard_resp_ds[i]["hazardValues"]) demand_types_ds = hazard_resp_ds[i]["demands"] demand_units_ds = hazard_resp_ds[i]["units"] @@ -243,8 +243,8 @@ def building_damage_analysis_bulk_input(self, buildings): if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp_ds[i]["hazardValues"]): building_args = fragility_set_ds.construct_expression_args_from_inventory(building) dmg_probability_ds = fragility_set_ds. \ - calculate_limit_state_refactored_w_conversion(hval_dict_ds, inventory_type="building", - **building_args) + calculate_limit_state(hval_dict_ds, inventory_type="building", + **building_args) # adjust dmg probability for liquefaction if use_liquefaction: if liq_geology_dataset_id is not None: diff --git a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py index 0b64ef0ea..21e9f25a5 100644 --- a/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py +++ b/pyincore/analyses/nonstructbuildingdamage/nonstructbuildingutil.py @@ -48,13 +48,15 @@ def adjust_damage_for_liquefaction(limit_state_probabilities, ground_failure_pro else: prob_ground_failure = ground_failure_probabilities[i] - adjusted_limit_state_probabilities[keys[i]] = limit_state_probabilities[keys[i]] + prob_ground_failure \ - - limit_state_probabilities[keys[i]] * prob_ground_failure + adjusted_limit_state_probabilities[keys[i]] = \ + limit_state_probabilities[keys[i]] + prob_ground_failure \ + - limit_state_probabilities[keys[i]] * prob_ground_failure # the final one is the last of limitStates should match with the last of ground failures j = len(limit_state_probabilities) - 1 prob_ground_failure = ground_failure_probabilities[-1] - adjusted_limit_state_probabilities[keys[j]] = limit_state_probabilities[keys[j]] + prob_ground_failure \ - - limit_state_probabilities[keys[j]] * prob_ground_failure + adjusted_limit_state_probabilities[keys[j]] = \ + limit_state_probabilities[keys[j]] \ + + prob_ground_failure - limit_state_probabilities[keys[j]] * prob_ground_failure return adjusted_limit_state_probabilities diff --git a/pyincore/analyses/pipelinedamage/pipelinedamage.py b/pyincore/analyses/pipelinedamage/pipelinedamage.py index 4f2056456..751eb221c 100644 --- a/pyincore/analyses/pipelinedamage/pipelinedamage.py +++ b/pyincore/analyses/pipelinedamage/pipelinedamage.py @@ -12,7 +12,7 @@ from pyincore import BaseAnalysis, HazardService, FragilityService, \ FragilityCurveSet, AnalysisUtil, GeoUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class PipelineDamage(BaseAnalysis): @@ -162,7 +162,7 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, fragility_set = fragility_sets[pipeline["id"]] # TODO: Once all fragilities are migrated to new format, we can remove this condition - if isinstance(fragility_set.fragility_curves[0], FragilityCurveRefactored): + if isinstance(fragility_set.fragility_curves[0], FragilityCurve): # Supports multiple demand types in same fragility haz_vals = AnalysisUtil.update_precision_of_lists(hazard_vals[i]["hazardValues"]) demand_types = hazard_vals[i]["demands"] @@ -175,9 +175,9 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, if not AnalysisUtil.do_hazard_values_have_errors(hazard_vals[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory(pipeline) - limit_states = fragility_set.calculate_limit_state_refactored_w_conversion(hval_dict, - inventory_type="pipeline", - **pipeline_args) + limit_states = fragility_set.calculate_limit_state(hval_dict, + inventory_type="pipeline", + **pipeline_args) dmg_intervals = fragility_set.calculate_damage_interval(limit_states, hazard_type=hazard_type, inventory_type="pipeline") diff --git a/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py b/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py index 2a06694d3..a8ac64d75 100644 --- a/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py +++ b/pyincore/analyses/pipelinedamagerepairrate/pipelinedamagerepairrate.py @@ -18,7 +18,7 @@ AnalysisUtil, GeoUtil from pyincore.analyses.pipelinedamagerepairrate.pipelineutil import \ PipelineUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class PipelineDamageRepairRate(BaseAnalysis): @@ -226,7 +226,7 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, fragility_curve = fragility_set.fragility_curves[0] # TODO: Once all fragilities are migrated to new format, we can remove this condition - if isinstance(fragility_set.fragility_curves[0], FragilityCurveRefactored): + if isinstance(fragility_set.fragility_curves[0], FragilityCurve): hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] @@ -237,9 +237,9 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): pipeline_args = fragility_set.construct_expression_args_from_inventory(pipeline) - pgv_repairs = fragility_curve.calculate_limit_state_probability(hval_dict, - fragility_set.fragility_curve_parameters, - **pipeline_args) + pgv_repairs = \ + fragility_curve.calculate_limit_state_probability( + hval_dict, fragility_set.fragility_curve_parameters, **pipeline_args) # Convert PGV repairs to SI units pgv_repairs = PipelineUtil.convert_result_unit(fragility_curve.return_type["unit"], pgv_repairs) @@ -254,7 +254,7 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, liq_fragility_curve = fragility_set_liq.fragility_curves[0] # TODO: Once all fragilities are migrated to new format, we can remove this condition - if isinstance(fragility_set_liq.fragility_curves[0], FragilityCurveRefactored): + if isinstance(fragility_set_liq.fragility_curves[0], FragilityCurve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] @@ -266,9 +266,9 @@ def pipeline_damage_analysis_bulk_input(self, pipelines, hazard_type, # !important! removing the liqProbability and passing in the "diameter" # no fragility is actually using liqProbability pipeline_args = fragility_set_liq.construct_expression_args_from_inventory(pipeline) - pgd_repairs = liq_fragility_curve.calculate_limit_state_probability(liq_hval_dict, - fragility_set_liq.fragility_curve_parameters, - **pipeline_args) + pgd_repairs = \ + liq_fragility_curve.calculate_limit_state_probability( + liq_hval_dict, fragility_set_liq.fragility_curve_parameters, **pipeline_args) # Convert PGD repairs to SI units pgd_repairs = PipelineUtil.convert_result_unit(liq_fragility_curve.return_type["unit"], pgd_repairs) diff --git a/pyincore/analyses/populationdislocation/populationdislocation.py b/pyincore/analyses/populationdislocation/populationdislocation.py index fc024807a..66a168908 100644 --- a/pyincore/analyses/populationdislocation/populationdislocation.py +++ b/pyincore/analyses/populationdislocation/populationdislocation.py @@ -198,8 +198,7 @@ def get_dislocation(self, seed_i: int, inventory: pd.DataFrame, value_loss: pd.D # total_prob_disl is the sum of the probability of dislocation at four damage states # times the probability of being in that damage state. - total_prob_disl = prob0_disl * prob0 + prob1_disl * prob1 + prob2_disl * prob2 + \ - prob3_disl * prob3 + total_prob_disl = prob0_disl * prob0 + prob1_disl * prob1 + prob2_disl * prob2 + prob3_disl * prob3 inventory["prdis"] = total_prob_disl diff --git a/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py b/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py index 6e8b87496..86a56bb3d 100644 --- a/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py +++ b/pyincore/analyses/residentialbuildingrecovery/residentialbuildingrecovery.py @@ -155,8 +155,8 @@ def household_income_prediction(income_groups, num_samples): group_hhinc_values = group['hhinc'].values # Compute normal ddistribution parameters from group data - mean = np.mean(group_hhinc_values) - std = np.std(group_hhinc_values) + mean = np.nanmean(group_hhinc_values) + std = np.nanstd(group_hhinc_values) if np.isnan(mean): mean = 3 @@ -166,7 +166,7 @@ def household_income_prediction(income_groups, num_samples): # Directly compute the indices of NaN values in the hhinc vector group_nan_idx = np.where(np.isnan(group_hhinc_values)) - number_nan = len(group_nan_idx) + number_nan = len(group_nan_idx[0]) # Now, generate a numpy matrix to hold the samples for the group group_samples = np.zeros((num_samples, group_size)) @@ -228,10 +228,10 @@ def household_aggregation(household_income_predictions): group_new = pd.DataFrame(no_guids_maxima, columns=colnames, index=group.index) group_new.insert(0, 'guid', local_guids) - new_groups.append(group_new) + new_groups.append(group_new.head(1)) # Construct a new DataFrame - household_aggregation_results = pd.concat(new_groups).sort_index() + household_aggregation_results = pd.concat(new_groups).reset_index(drop=True) return household_aggregation_results diff --git a/pyincore/analyses/roaddamage/roaddamage.py b/pyincore/analyses/roaddamage/roaddamage.py index 759127e0f..4cbd3b59a 100644 --- a/pyincore/analyses/roaddamage/roaddamage.py +++ b/pyincore/analyses/roaddamage/roaddamage.py @@ -9,7 +9,7 @@ from itertools import repeat from pyincore import BaseAnalysis, HazardService, FragilityService, AnalysisUtil, GeoUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class RoadDamage(BaseAnalysis): @@ -191,7 +191,7 @@ def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, if use_hazard_uncertainty: raise ValueError("Uncertainty Not Implemented Yet.") - if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurveRefactored): + if isinstance(selected_fragility_set.fragility_curves[0], FragilityCurve): hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] @@ -201,7 +201,7 @@ def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): road_args = selected_fragility_set.construct_expression_args_from_inventory(road) - dmg_probability = selected_fragility_set.calculate_limit_state_refactored_w_conversion( + dmg_probability = selected_fragility_set.calculate_limit_state( hval_dict, inventory_type='road', **road_args) # if there is liquefaction, overwrite the hazardval with liquefaction value @@ -214,12 +214,11 @@ def road_damage_analysis_bulk_input(self, roads, hazard_type, hazard_dataset_id, liq_hval_dict = dict() for j, d in enumerate(liquefaction_resp[i]["demands"]): liq_hval_dict[d] = liq_hazard_vals[j] - dmg_probability = selected_fragility_set.calculate_limit_state_refactored_w_conversion( + dmg_probability = selected_fragility_set.calculate_limit_state( liq_hval_dict, inventory_type='road', **road_args) - dmg_interval = selected_fragility_set.calculate_damage_interval(dmg_probability, hazard_type=hazard_type, inventory_type="road") diff --git a/pyincore/analyses/tornadoepndamage/tornadoepndamage.py b/pyincore/analyses/tornadoepndamage/tornadoepndamage.py index 23a62327b..4df1a7296 100644 --- a/pyincore/analyses/tornadoepndamage/tornadoepndamage.py +++ b/pyincore/analyses/tornadoepndamage/tornadoepndamage.py @@ -15,7 +15,7 @@ from pyincore import BaseAnalysis, HazardService, FragilityService, DataService, FragilityCurveSet from pyincore import GeoUtil, NetworkUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class TornadoEpnDamage(BaseAnalysis): @@ -305,11 +305,11 @@ def get_damage(self, node_dataset, link_dataset, tornado_dataset, tornado_id): hval_dict[d] = tor_hazard_values[j] j += 1 if isinstance(fragility_set_used.fragility_curves[0], - FragilityCurveRefactored): + FragilityCurve): inventory_args = fragility_set_used.construct_expression_args_from_inventory( tornado_feature) resistivity_probability = \ - fragility_set_used.calculate_limit_state_refactored_w_conversion( + fragility_set_used.calculate_limit_state( hval_dict, inventory_type=fragility_set_used.inventory_type, **inventory_args) else: diff --git a/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py b/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py index 6a1158769..4c0b7b9cb 100644 --- a/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py +++ b/pyincore/analyses/waterfacilitydamage/waterfacilitydamage.py @@ -14,7 +14,7 @@ from pyincore import BaseAnalysis, HazardService, FragilityService, GeoUtil, \ AnalysisUtil -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored +from pyincore.models.fragilitycurve import FragilityCurve class WaterFacilityDamage(BaseAnalysis): @@ -241,7 +241,7 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard_type, if uncertainty: hazard_std_dev = random.random() - if isinstance(fragility_set.fragility_curves[0], FragilityCurveRefactored): + if isinstance(fragility_set.fragility_curves[0], FragilityCurve): hazard_vals = AnalysisUtil.update_precision_of_lists(hazard_resp[i]["hazardValues"]) demand_types = hazard_resp[i]["demands"] demand_units = hazard_resp[i]["units"] @@ -254,15 +254,15 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard_type, if not AnalysisUtil.do_hazard_values_have_errors(hazard_resp[i]["hazardValues"]): facility_args = fragility_set.construct_expression_args_from_inventory(facility) limit_states = \ - fragility_set.calculate_limit_state_refactored_w_conversion(hval_dict, - std_dev=hazard_std_dev, - inventory_type='water_facility', - **facility_args) + fragility_set.calculate_limit_state(hval_dict, + std_dev=hazard_std_dev, + inventory_type='water_facility', + **facility_args) # Evaluate liquefaction: if it is not none, then liquefaction is available if liquefaction_resp is not None: fragility_set_liq = fragility_sets_liq[facility["id"]] - if isinstance(fragility_set_liq.fragility_curves[0], FragilityCurveRefactored): + if isinstance(fragility_set_liq.fragility_curves[0], FragilityCurve): liq_hazard_vals = AnalysisUtil.update_precision_of_lists(liquefaction_resp[i]["pgdValues"]) liq_demand_types = liquefaction_resp[i]["demands"] liq_demand_units = liquefaction_resp[i]["units"] @@ -275,17 +275,18 @@ def waterfacilityset_damage_analysis_bulk_input(self, facilities, hazard_type, facility_liq_args = fragility_set_liq.construct_expression_args_from_inventory(facility) pgd_limit_states = \ - fragility_set_liq.calculate_limit_state_refactored_w_conversion( - hval_dict_liq,std_dev=hazard_std_dev,inventory_type="water_facility", + fragility_set_liq.calculate_limit_state( + hval_dict_liq, std_dev=hazard_std_dev, inventory_type="water_facility", **facility_liq_args) else: - raise ValueError("One of the fragilities is in deprecated format. This should not happen. " - "If you are seeing this please report the issue.") + raise ValueError("One of the fragilities is in deprecated format. " + "This should not happen If you are seeing this please report the issue.") limit_states = AnalysisUtil.adjust_limit_states_for_pgd(limit_states, pgd_limit_states) - dmg_intervals = fragility_set.calculate_damage_interval(limit_states, hazard_type=hazard_type, - inventory_type='water_facility') + dmg_intervals = fragility_set.calculate_damage_interval(limit_states, + hazard_type=hazard_type, + inventory_type='water_facility') else: raise ValueError("One of the fragilities is in deprecated format. This should not happen. If you are " "seeing this please report the issue.") diff --git a/pyincore/baseanalysis.py b/pyincore/baseanalysis.py index 7f395479c..40b85ffd4 100644 --- a/pyincore/baseanalysis.py +++ b/pyincore/baseanalysis.py @@ -224,12 +224,12 @@ def set_result_csv_data(self, result_id, result_data, name, source='file'): if not name.endswith(".csv"): name = name + ".csv" + dataset_type = self.output_datasets[result_id]["spec"]["type"] if source == 'file': - dataset = Dataset.from_csv_data(result_data, name) + dataset = Dataset.from_csv_data(result_data, name, dataset_type) elif source == 'dataframe': - dataset = Dataset.from_dataframe(result_data, name) + dataset = Dataset.from_dataframe(result_data, name, dataset_type) - dataset.data_type = self.output_datasets[result_id]["spec"]["type"] self.set_output_dataset(result_id, dataset) def set_result_json_data(self, result_id, result_data, name, source='file'): @@ -239,10 +239,10 @@ def set_result_json_data(self, result_id, result_data, name, source='file'): if not name.endswith(".json"): name = name + ".json" + dataset_type = self.output_datasets[result_id]["spec"]["type"] if source == 'file': - dataset = Dataset.from_json_data(result_data, name) + dataset = Dataset.from_json_data(result_data, name, dataset_type) - dataset.data_type = self.output_datasets[result_id]["spec"]["type"] self.set_output_dataset(result_id, dataset) def run_analysis(self): diff --git a/pyincore/client.py b/pyincore/client.py index d4a589bf5..e34107f7e 100644 --- a/pyincore/client.py +++ b/pyincore/client.py @@ -104,11 +104,10 @@ def return_http_response(http_response): http_response.raise_for_status() return http_response except requests.exceptions.HTTPError: - logger.error('HTTPError: The server returned a ' - + str(http_response.status_code) + ' failure response code. You can ' - 'find information about HTTP response ' - 'status codes here: ' - 'https://developer.mozilla.org/en-US/docs/Web/HTTP/Status') + logger.error('A HTTPError has occurred \n' + + 'HTTP Status code: ' + str(http_response.status_code) + '\n' + + 'Error Message: ' + http_response.content.decode() + ) raise except requests.exceptions.ConnectionError: logger.error("ConnectionError: Failed to establish a connection with the server. " diff --git a/pyincore/dataset.py b/pyincore/dataset.py index d84383d71..da51932ab 100644 --- a/pyincore/dataset.py +++ b/pyincore/dataset.py @@ -108,27 +108,29 @@ def from_file(cls, file_path, data_type): return instance @classmethod - def from_dataframe(cls, dataframe, name): + def from_dataframe(cls, dataframe, name, data_type): """Get Dataset from Panda's DataFrame. Args: dataframe (obj): Panda's DataFrame. name (str): filename. + data_type (str): Incore data type, e.g. incore:xxxx or ergo:xxxx Returns: obj: Dataset from file. """ dataframe.to_csv(name, index=False) - return Dataset.from_file(name, "csv") + return Dataset.from_file(name, data_type) @classmethod - def from_csv_data(cls, result_data, name): + def from_csv_data(cls, result_data, name, data_type): """Get Dataset from CSV data. Args: result_data (obj): Result data and metadata. name (str): A CSV filename. + data_type (str): Incore data type, e.g. incore:xxxx or ergo:xxxx Returns: obj: Dataset from file. @@ -140,15 +142,16 @@ def from_csv_data(cls, result_data, name): writer = csv.DictWriter(csv_file, dialect="unix", fieldnames=result_data[0].keys()) writer.writeheader() writer.writerows(result_data) - return Dataset.from_file(name, "csv") + return Dataset.from_file(name, data_type) @classmethod - def from_json_data(cls, result_data, name): + def from_json_data(cls, result_data, name, data_type): """Get Dataset from JSON data. Args: result_data (obj): Result data and metadata. name (str): A JSON filename. + data_type (str): Incore data type, e.g. incore:xxxx or ergo:xxxx Returns: obj: Dataset from file. @@ -158,7 +161,7 @@ def from_json_data(cls, result_data, name): with open(name, 'w') as json_file: json_dumps_str = json.dumps(result_data, indent=4) json_file.write(json_dumps_str) - return Dataset.from_file(name, "json") + return Dataset.from_file(name, data_type) def cache_files(self, data_service: DataService): """Get the set of fragility data, curves. diff --git a/pyincore/globals.py b/pyincore/globals.py index 559d6642f..dfa90e3d4 100644 --- a/pyincore/globals.py +++ b/pyincore/globals.py @@ -10,7 +10,7 @@ import os import shutil -PACKAGE_VERSION = "1.0.0" +PACKAGE_VERSION = "1.1.0" INCORE_API_PROD_URL = "https://incore.ncsa.illinois.edu" INCORE_API_DEV_URL = "https://incore-dev.ncsa.illinois.edu" diff --git a/pyincore/hazardservice.py b/pyincore/hazardservice.py index 4bd6e4b15..333d42e52 100644 --- a/pyincore/hazardservice.py +++ b/pyincore/hazardservice.py @@ -78,8 +78,6 @@ def get_earthquake_hazard_metadata(self, hazard_id: str): return response - - def get_earthquake_hazard_value_set(self, hazard_id: str, demand_type: str, demand_unit: str, bbox, grid_spacing: float): diff --git a/pyincore/models/conditionalstandardfragilitycurve.py b/pyincore/models/conditionalstandardfragilitycurve.py deleted file mode 100644 index f5a99ba15..000000000 --- a/pyincore/models/conditionalstandardfragilitycurve.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (c) 2019 University of Illinois and others. All rights reserved. -# -# This program and the accompanying materials are made available under the -# terms of the Mozilla Public License v2.0 which accompanies this distribution, -# and is available at https://www.mozilla.org/en-US/MPL/2.0/ - - -import math - -from scipy.stats import norm - -from pyincore.models.fragilitycurve import FragilityCurve - - -class ConditionalStandardFragilityCurve(FragilityCurve): - """ - A class to represent conditional standard fragility curve. - """ - - def __init__(self, curve_parameters): - self.alpha = curve_parameters['alpha'] - self.beta = curve_parameters['beta'] - self.alpha_type = curve_parameters['alphaType'] - self.curve_type = curve_parameters['curveType'] - self.rules = curve_parameters['rules'] - - super(ConditionalStandardFragilityCurve, self).__init__(curve_parameters) - - def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): - """Computes limit state probabilities. - - Args: - hazard (float): A hazard value to compute probability for. - period (float): A building period default to 0. - std_dev (float): A standard deviation. - **kwargs: Keyword arguments. - - Returns: - float: A limit state probability. - - """ - probability = float(0.0) - if hazard > 0.0: - index = ConditionalStandardFragilityCurve._fragility_curve_rules_match(self.rules, hazard) - if index is not None: - alpha = float(self.alpha[index]) - beta = math.sqrt(math.pow(self.beta[index], 2) + math.pow(std_dev, 2)) - - if self.alpha_type == 'median': - sp = (math.log(hazard) - math.log(alpha)) / beta - probability = norm.cdf(sp) - elif self.alpha_type == "lambda": - x = (math.log(hazard) - alpha) / beta - probability = norm.cdf(x) - else: - raise ValueError("No matching rule has been found in this conditonal standard fragility curve. " - "Please verify it's the right curve to use.") - - return probability - - @staticmethod - def _fragility_curve_rules_match(rules, value): - """Given value and rules; decide which index to use. - - Args: - rules (dict): Index: ["rule1", "rule2"...]. - value (float): A value to be evaluated against. - - Returns: - int: index (which used to decide which pair of alpha and beta to use) - """ - - # add more operators if needed - known_operators = { - "EQ": "==", - "EQUALS": "==", - "NEQUALS": "!=", - "GT": ">", - "GE": ">=", - "LT": "<", - "LE": "<=", - } - - # if rules is [[]] meaning it matches without any condition - for index, rule in rules.items(): - # TODO: for now assuming only one rule; in the future need to consider if it's a range - # TODO: eg. demand GT 3, demand LT 4 - # TODO: for now default it's always using the hazard value as the rule_key - - # the format of a rule is always: rule_key + rule_operator + rule_value - elements = rule[0].split(" ", 2) - - rule_operator = elements[1] - if rule_operator not in known_operators.keys(): - raise ValueError(rule_operator + " Unknown. Cannot parse the rules of this mapping!") - - rule_value = elements[2] - - matched = eval(str(value) + known_operators[rule_operator] + rule_value) - if matched: - return int(index) - - return None diff --git a/pyincore/models/customexpressionfragilitycurve.py b/pyincore/models/customexpressionfragilitycurve.py deleted file mode 100644 index 8c7fb2806..000000000 --- a/pyincore/models/customexpressionfragilitycurve.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2019 University of Illinois and others. All rights reserved. -# -# This program and the accompanying materials are made available under the -# terms of the Mozilla Public License v2.0 which accompanies this distribution, -# and is available at https://www.mozilla.org/en-US/MPL/2.0/ - - -from pyincore import Parser - -from pyincore.models.fragilitycurve import FragilityCurve - - -class CustomExpressionFragilityCurve(FragilityCurve): - """A class to represent custom expression fragility curve""" - - def __init__(self, curve_parameters): - self.expression = curve_parameters['expression'] - - super(CustomExpressionFragilityCurve, self).__init__(curve_parameters) - - def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): - """ - - Args: - hazard (float): A hazard value. - period (float): A period value. - std_dev (float): A standard deviation. - **kwargs: Keyword arguments. - - Raises: - ValueError: If this limit state calculation method is not supported. - - """ - raise ValueError("Custom Expression Fragility Curve does not support this limit state calculation method. " - "Please use compute_custom_limit_state_probability(variables) instead!") - - def compute_custom_limit_state_probability(self, variables: dict): - """Computes custom limit state probabilities. - - Args: - variables (dict): Variables to set. - - Returns: - float: A limit state probability. - - """ - expression = self.expression - parser = Parser() - probability = parser.parse(expression).evaluate(variables) - - return probability diff --git a/pyincore/models/fragilitycurve.py b/pyincore/models/fragilitycurve.py index a5dea06c2..3045d399d 100644 --- a/pyincore/models/fragilitycurve.py +++ b/pyincore/models/fragilitycurve.py @@ -1,68 +1,131 @@ -# Copyright (c) 2019 University of Illinois and others. All rights reserved. +# Copyright (c) 2020 University of Illinois and others. All rights reserved. # # This program and the accompanying materials are made available under the # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ +import json +import math +from abc import ABC -from deprecated.sphinx import deprecated +from pyincore import globals as pyglobals +from pyincore.utils import evaluateexpression +logger = pyglobals.LOGGER -class FragilityCurve: - """Abstract class for fragility curves.""" + +class FragilityCurve(ABC): + """A class to represent conditional standard fragility curve.""" def __init__(self, curve_parameters): + self.rules = curve_parameters['rules'] + self.return_type = curve_parameters['returnType'] + + for rule in self.rules: + rule["expression"] = rule["expression"].replace("^", "**") self.description = curve_parameters['description'] - def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): + def calculate_limit_state_probability(self, hazard_values: dict, fragility_curve_parameters: dict, **kwargs): """Computes limit state probabilities. Args: - hazard (float): A hazard value. - period (float): A period value. - std_dev (float): A standard deviation. + hazard_values (dict): Hazard values. + fragility_curve_parameters (dict): Fragility curve parameters. **kwargs: Keyword arguments. Returns: - float: Limit state probability. + float: A limit state probability. """ - probability = float(0.0) - return probability - - @deprecated(version="0.9.7", reason="This method is already incorporated in refactored fragility curves and will " - "be deprecated") - def adjust_fragility_for_liquefaction(self, liquefaction: str): - """Adjusts fragility curve object by input parameter liquefaction. + parameters = {} + mapped_demand_types = {} + # For all curve parameters: + # 1. Figure out if parameter name needs to be mapped (i.e. the name contains forbidden characters) + # 2. Fetch all parameters listed in the curve from kwargs and if there are not in kwargs, use default values + # from the curve. + for parameter in fragility_curve_parameters: + # if default exists, use default + if "expression" in parameter and parameter["expression"] is not None: + parameters[parameter["name"]] = evaluateexpression.evaluate(parameter["expression"], parameters) + else: + parameters[parameter["name"]] = None + + # e.g. map point_two_sec_sa to its full name (0.2 Sec Sa) + if "fullName" in parameter and parameter["fullName"] is not None: + mapped_demand_types[parameter["fullName"]] = parameter["name"] + + # else overwrite with real values; make sure it handles case sensitivity + for kwargs_key, kwargs_value in kwargs.items(): + if "fullName" in parameter and parameter["fullName"] is not None: + if parameter["fullName"].lower() == kwargs_key.lower(): + parameters[parameter["name"]] = kwargs_value + elif parameter["name"].lower() == kwargs_key.lower(): + parameters[parameter["name"]] = kwargs_value + + probability = 0.0 + + # use hazard values if present + # consider case insensitive situation + for key, value in hazard_values.items(): + if key in mapped_demand_types: + key = mapped_demand_types[key] + for parameter_key in parameters.keys(): + if parameter_key.lower() == key.lower(): + if value is not None: + parameters[parameter_key] = value + else: + # returning 0 even if a single demand value is None, assumes there is no hazard exposure. TBD + return probability + + for rule in self.rules: + if "condition" not in rule or rule["condition"] is None: + probability = evaluateexpression.evaluate(rule["expression"], parameters) + else: + conditions_met = [] + for condition in rule["condition"]: + if evaluateexpression.evaluate(condition, parameters): + conditions_met.append(True) + else: + conditions_met.append(False) + break + if all(conditions_met): + probability = evaluateexpression.evaluate(rule["expression"], parameters) + break + + if math.isnan(probability): + error_msg = "Unable to calculate limit state." + if self.rules: + error_msg += " Evaluation failed for expression: \n" + json.dumps(self.rules) + "\n" + error_msg += "Provided Inputs: \n" + json.dumps(hazard_values) + "\n" + json.dumps(kwargs) + + raise ValueError(error_msg) - Args: - liquefaction (str): Liquefaction type. - - """ - raise NotImplementedError("This function is currently only applied to Standard Fragility Curve, " - "and Period Standard Fragility Curve") + return probability def get_building_period(self, num_stories): """Get building period from the fragility curve. - Args: num_stories (int): Number of building stories. - Returns: float: Building period. - """ period = 0.0 return period - def compute_custom_limit_state_probability(self, variables: dict): - """Computes custom limit state probabilities. - - Args: - variables (dict): Variables to set. - - Returns: - float: A limit state probability. - - """ - probability = float(0.0) - return probability + # def get_building_period(self, fragility_curve_parameters, **kwargs): + # period = 0.0 + # num_stories = 1.0 + # for parameter in fragility_curve_parameters: + # # if default exists, use default + # if parameter["name"] == "num_stories" and "expression" in parameter and parameter["expression"] is not None: + # num_stories = evaluateexpression.evaluate(parameter["expression"]) + # + # # if exist in building inventory + # for kwargs_key, kwargs_value in kwargs.items(): + # if kwargs_key.lower() == "num_stories" and kwargs_value is not None and kwargs_value > 0: + # num_stories = kwargs_value + # + # # calculate period + # if parameter["name"] == "period" and "expression" in parameter and parameter["expression"] is not None: + # period = evaluateexpression.evaluate(parameter["expression"], {"num_stories": num_stories}) + # + # return period diff --git a/pyincore/models/fragilitycurverefactored.py b/pyincore/models/fragilitycurverefactored.py deleted file mode 100644 index de53f7a9f..000000000 --- a/pyincore/models/fragilitycurverefactored.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2020 University of Illinois and others. All rights reserved. -# -# This program and the accompanying materials are made available under the -# terms of the Mozilla Public License v2.0 which accompanies this distribution, -# and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import json -import math -from abc import ABC - -from pyincore import globals as pyglobals -from pyincore.utils import evaluateexpression -from pyincore.models.fragilitycurve import FragilityCurve - -logger = pyglobals.LOGGER - - -class FragilityCurveRefactored(FragilityCurve, ABC): - """A class to represent conditional standard fragility curve.""" - - def __init__(self, curve_parameters): - self.rules = curve_parameters['rules'] - self.return_type = curve_parameters['returnType'] - - for rule in self.rules: - rule["expression"] = rule["expression"].replace("^", "**") - - super(FragilityCurveRefactored, self).__init__(curve_parameters) - - def calculate_limit_state_probability(self, hazard_values: dict, fragility_curve_parameters: dict, **kwargs): - """Computes limit state probabilities. - - Args: - hazard_values (dict): Hazard values. - fragility_curve_parameters (dict): Fragility curve parameters. - **kwargs: Keyword arguments. - - Returns: - float: A limit state probability. - - """ - parameters = {} - mapped_demand_types = {} - # For all curve parameters: - # 1. Figure out if parameter name needs to be mapped (i.e. the name contains forbidden characters) - # 2. Fetch all parameters listed in the curve from kwargs and if there are not in kwargs, use default values - # from the curve. - for parameter in fragility_curve_parameters: - # if default exists, use default - if "expression" in parameter and parameter["expression"] is not None: - parameters[parameter["name"]] = evaluateexpression.evaluate(parameter["expression"], parameters) - else: - parameters[parameter["name"]] = None - - # e.g. map point_two_sec_sa to its full name (0.2 Sec Sa) - if "fullName" in parameter and parameter["fullName"] is not None: - mapped_demand_types[parameter["fullName"]] = parameter["name"] - - # else overwrite with real values; make sure it handles case sensitivity - for kwargs_key, kwargs_value in kwargs.items(): - if "fullName" in parameter and parameter["fullName"] is not None: - if parameter["fullName"].lower() == kwargs_key.lower(): - parameters[parameter["name"]] = kwargs_value - elif parameter["name"].lower() == kwargs_key.lower(): - parameters[parameter["name"]] = kwargs_value - - probability = 0.0 - - # use hazard values if present - # consider case insensitive situation - for key, value in hazard_values.items(): - if key in mapped_demand_types: - key = mapped_demand_types[key] - for parameter_key in parameters.keys(): - if parameter_key.lower() == key.lower(): - if value is not None: - parameters[parameter_key] = value - else: - # returning 0 even if a single demand value is None, assumes there is no hazard exposure. TBD - return probability - - for rule in self.rules: - if "condition" not in rule or rule["condition"] is None: - probability = evaluateexpression.evaluate(rule["expression"], parameters) - else: - conditions_met = [] - for condition in rule["condition"]: - if evaluateexpression.evaluate(condition, parameters): - conditions_met.append(True) - else: - conditions_met.append(False) - break - if all(conditions_met): - probability = evaluateexpression.evaluate(rule["expression"], parameters) - break - - if math.isnan(probability): - error_msg = "Unable to calculate limit state." - if self.rules: - error_msg += " Evaluation failed for expression: \n" + json.dumps(self.rules) + "\n" - error_msg += "Provided Inputs: \n" + json.dumps(hazard_values) + "\n" + json.dumps(kwargs) - - raise ValueError(error_msg) - - return probability - - # def get_building_period(self, fragility_curve_parameters, **kwargs): - # period = 0.0 - # num_stories = 1.0 - # for parameter in fragility_curve_parameters: - # # if default exists, use default - # if parameter["name"] == "num_stories" and "expression" in parameter and parameter["expression"] is not None: - # num_stories = evaluateexpression.evaluate(parameter["expression"]) - # - # # if exist in building inventory - # for kwargs_key, kwargs_value in kwargs.items(): - # if kwargs_key.lower() == "num_stories" and kwargs_value is not None and kwargs_value > 0: - # num_stories = kwargs_value - # - # # calculate period - # if parameter["name"] == "period" and "expression" in parameter and parameter["expression"] is not None: - # period = evaluateexpression.evaluate(parameter["expression"], {"num_stories": num_stories}) - # - # return period diff --git a/pyincore/models/fragilitycurveset.py b/pyincore/models/fragilitycurveset.py index 1b26641ca..0ed88d5ba 100644 --- a/pyincore/models/fragilitycurveset.py +++ b/pyincore/models/fragilitycurveset.py @@ -4,19 +4,9 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ -import collections import json -from deprecated.sphinx import deprecated - -from pyincore.models.customexpressionfragilitycurve import CustomExpressionFragilityCurve from pyincore.models.fragilitycurve import FragilityCurve -from pyincore.models.periodbuildingfragilitycurve import PeriodBuildingFragilityCurve -from pyincore.models.periodstandardfragilitycurve import PeriodStandardFragilityCurve -from pyincore.models.standardfragilitycurve import StandardFragilityCurve -from pyincore.models.conditionalstandardfragilitycurve import ConditionalStandardFragilityCurve -from pyincore.models.parametricfragilitycurve import ParametricFragilityCurve -from pyincore.models.fragilitycurverefactored import FragilityCurveRefactored from pyincore.utils.analysisutil import AnalysisUtil @@ -52,29 +42,7 @@ def __init__(self, metadata): if 'fragilityCurves' in metadata.keys(): for fragility_curve in metadata["fragilityCurves"]: - - # if it's already an df3curve object, directly put it in the list: - if isinstance(fragility_curve, FragilityCurve): - self.fragility_curves.append(fragility_curve) - # based on what type of fragility_curve it is, instantiate different fragility curve object - else: - if fragility_curve['className'] == 'StandardFragilityCurve': - self.fragility_curves.append(StandardFragilityCurve(fragility_curve)) - elif fragility_curve['className'] == 'PeriodBuildingFragilityCurve': - self.fragility_curves.append(PeriodBuildingFragilityCurve(fragility_curve)) - elif fragility_curve['className'] == 'PeriodStandardFragilityCurve': - self.fragility_curves.append(PeriodStandardFragilityCurve(fragility_curve)) - elif fragility_curve['className'] == 'CustomExpressionFragilityCurve': - self.fragility_curves.append(CustomExpressionFragilityCurve(fragility_curve)) - elif fragility_curve['className'] == 'ConditionalStandardFragilityCurve': - self.fragility_curves.append(ConditionalStandardFragilityCurve(fragility_curve)) - elif fragility_curve['className'] == 'ParametricFragilityCurve': - self.fragility_curves.append(ParametricFragilityCurve(fragility_curve)) - elif fragility_curve['className'] == 'FragilityCurveRefactored': - self.fragility_curves.append(FragilityCurveRefactored(fragility_curve)) - else: - # TODO make a custom fragility curve class that accept whatever - self.fragility_curves.append(fragility_curve) + self.fragility_curves.append(FragilityCurve(fragility_curve)) elif 'repairCurves' in metadata.keys(): self.repairCurves = metadata['repairCurves'] elif 'restorationCurves' in metadata.keys(): @@ -111,75 +79,9 @@ def from_json_file(cls, file_path): return instance - @deprecated(version="0.9.0", reason="use calculate_limit_state_w_conversion instead") - def calculate_limit_state(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): - """Computes limit state probabilities. - - Args: - hazard (float): A hazard value to compute probability for. - period (float): A period of the structure, if applicable. - std_dev (float): A standard deviation. - **kwargs: Keyword arguments. - - Returns: - OrderedDict: Limit state probabilities. - - """ - output = collections.OrderedDict() - index = 0 - - if len(self.fragility_curves) == 1: - limit_state = ['failure'] - elif len(self.fragility_curves) == 3: - limit_state = ['immocc', 'lifesfty', 'collprev'] - elif len(self.fragility_curves) == 4: - limit_state = ['ls-slight', 'ls-moderat', 'ls-extensi', 'ls-complet'] - else: - raise ValueError("We can only handle fragility curves with 1, 3 or 4 limit states!") - - for fragility_curve in self.fragility_curves: - probability = fragility_curve.calculate_limit_state_probability(hazard, period, std_dev, **kwargs) - output[limit_state[index]] = AnalysisUtil.update_precision(probability) # round to default digits - index += 1 - - return output - - @deprecated(version="0.9.0", reason="use calculate_limit_state_refactored_w_conversion instead") - def calculate_limit_state_refactored(self, hazard_values: dict = {}, **kwargs): - """WIP computation of limit state probabilities accounting for custom expressions. - - Args: - hazard_values (dict): A dictionary with hazard values to compute probability. - **kwargs: Keyword arguments. - - Returns: - OrderedDict: Limit state probabilities. - - """ - output = collections.OrderedDict() - index = 0 - - if len(self.fragility_curves) == 1: - limit_state = ['failure'] - elif len(self.fragility_curves) == 3: - limit_state = ['immocc', 'lifesfty', 'collprev'] - elif len(self.fragility_curves) == 4: - limit_state = ['ls-slight', 'ls-moderat', 'ls-extensi', 'ls-complet'] - else: - raise ValueError("We can only handle fragility curves with 1, 3 or 4 limit states!") - - for fragility_curve in self.fragility_curves: - probability = fragility_curve.calculate_limit_state_probability(hazard_values, - self.fragility_curve_parameters, - **kwargs) - output[limit_state[index]] = AnalysisUtil.update_precision(probability) # round to default digits - index += 1 - - return output - - def calculate_limit_state_refactored_w_conversion(self, hazard_values: dict = {}, - inventory_type: str = "building", - **kwargs): + def calculate_limit_state(self, hazard_values: dict = {}, + inventory_type: str = "building", + **kwargs): """WIP computation of limit state probabilities accounting for custom expressions. Args: @@ -207,36 +109,6 @@ def calculate_limit_state_refactored_w_conversion(self, hazard_values: dict = {} return output - @deprecated(version="0.9.0", reason="use calculate_custom_limit_state_w_conversion instead") - def calculate_custom_limit_state(self, variables: dict): - """Computes limit state probabilities. - - Args: - variables (dict): A dictionary of variables. - - Returns: - OrderedDict: Limit state probabilities for custom expression fragilities. - - """ - output = collections.OrderedDict() - index = 0 - - if len(self.fragility_curves) == 1: - limit_state = ['failure'] - elif len(self.fragility_curves) == 3: - limit_state = ['immocc', 'lifesfty', 'collprev'] - elif len(self.fragility_curves) == 4: - limit_state = ['ls-slight', 'ls-moderat', 'ls-extensi', 'ls-complet'] - else: - raise ValueError("We can only handle fragility curves with 1, 3 or 4 limit states!") - - for fragility_curve in self.fragility_curves: - probability = fragility_curve.compute_custom_limit_state_probability(variables) - output[limit_state[index]] = AnalysisUtil.update_precision(probability) # round to default digits - index += 1 - - return output - def calculate_limit_state_w_conversion(self, hazard, period: float = 0.0, std_dev: float = 0.0, inventory_type: str = "building", **kwargs): """Computes limit state probabilities. diff --git a/pyincore/models/parametricfragilitycurve.py b/pyincore/models/parametricfragilitycurve.py deleted file mode 100644 index cee25cd9c..000000000 --- a/pyincore/models/parametricfragilitycurve.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2019 University of Illinois and others. All rights reserved. -# -# This program and the accompanying materials are made available under the -# terms of the Mozilla Public License v2.0 which accompanies this distribution, -# and is available at https://www.mozilla.org/en-US/MPL/2.0/ - - -import math - -from pyincore.models.fragilitycurve import FragilityCurve - - -class ParametricFragilityCurve(FragilityCurve): - """ - class to represent parametric fragility curve - """ - - def __init__(self, curve_parameters): - # TODO: not sure if i need to define a class of parameters with "name", "unit", "coefficient" and - # "interceptTermDefault" as fixed fields; is it going to be over complicated? - self.parameters = curve_parameters['parameters'] - self.curve_type = curve_parameters['curveType'] - - super(ParametricFragilityCurve, self).__init__(curve_parameters) - - def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): - """ - - Args: - hazard (float): A hazard value to compute probability for. - period (float): A building period default to 0. - std_dev (float): A standard deviation. - **kwargs: Keyword arguments. - - Returns: - float: A probability, pf (DS) = exp(X*theta')/(1+exp(X*theta')); - - Example: pf(DS) = EXP(1 * A0 + log(PGA) * A1 + A2*X2 + ...) / (1 + EXP(1 *A0 + log(PGA) * A1 + ...)) - - """ - probability = float(0.0) - - if self.curve_type.lower() == "logit": - cumulate_term = 0 # X*theta' - - for parameter_set in self.parameters: - name = parameter_set["name"].lower() - coefficient = parameter_set["coefficient"] - default = parameter_set["interceptTermDefault"] - if name == "demand": - cumulate_term += math.log(hazard) * coefficient - else: - if name not in kwargs.keys(): - cumulate_term += default * coefficient - else: - cumulate_term += kwargs[name] * coefficient - - probability = math.exp(cumulate_term) / (1 + math.exp(cumulate_term)) - else: - raise ValueError("Other parametric functions than Logit has not been implemented yet!") - - return probability diff --git a/pyincore/models/periodbuildingfragilitycurve.py b/pyincore/models/periodbuildingfragilitycurve.py deleted file mode 100644 index 679e4fdc9..000000000 --- a/pyincore/models/periodbuildingfragilitycurve.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) 2019 University of Illinois and others. All rights reserved. -# -# This program and the accompanying materials are made available under the -# terms of the Mozilla Public License v2.0 which accompanies this distribution, -# and is available at https://www.mozilla.org/en-US/MPL/2.0/ - - -import math - -from scipy.stats import norm - -from pyincore.models.fragilitycurve import FragilityCurve -from deprecated.sphinx import deprecated - - -class PeriodBuildingFragilityCurve(FragilityCurve): - """A class to represent period building fragility curve""" - - def __init__(self, curve_parameters): - self.period_eqn_type = curve_parameters['periodEqnType'] - self.period_param1 = curve_parameters['periodParam1'] - self.period_param2 = curve_parameters['periodParam2'] - self.period_param0 = curve_parameters['periodParam0'] - self.fs_param0 = curve_parameters['fsParam0'] - self.fs_param1 = curve_parameters['fsParam1'] - self.fs_param2 = curve_parameters['fsParam2'] - self.fs_param3 = curve_parameters['fsParam3'] - self.fs_param4 = curve_parameters['fsParam4'] - self.fs_param5 = curve_parameters['fsParam5'] - - super(PeriodBuildingFragilityCurve, self).__init__(curve_parameters) - - def get_building_period(self, num_stories): - """Get building period from the fragility curve. - - Args: - num_stories (int): Number of building stories. - - Returns: - float: Building period. - - """ - period = 0.0 - period_equation_type = self.period_eqn_type - if period_equation_type == 1: - period = self.period_param0 - elif period_equation_type == 2: - period = self.period_param0 * num_stories - elif period_equation_type == 3: - period = self.period_param1 * math.pow( - self.period_param0 * num_stories, - self.period_param2) - - return period - - def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): - """Computes limit state probabilities. - - Args: - hazard (float): A hazard value to compute probability for. - period (float): A building period default to 0. - std_dev (float): A standard deviation. - **kwargs: Keyword arguments. - - Returns: - float: A limit state probability. - - """ - - probability = float(0.0) - - if hazard > 0.0: - # If no period is provided, assumption is 0 since the user should check their - # data for missing parameters (e.g. number of building stories). - probability = PeriodBuildingFragilityCurve.compute_period_building_fragility_value( - hazard, period, self.fs_param0, self.fs_param1, self.fs_param2, self.fs_param3, self.fs_param4, - self.fs_param5) - - return probability - - @staticmethod - def compute_period_building_fragility_value(hazard, period, a11_param, a12_param, a13_param, a14_param, - a21_param, a22_param): - - # Assumption from Ergo BuildingLowPeriodSolver - cutoff_period = 0.87 - - probability = 0.0 - if period < cutoff_period: - multiplier = cutoff_period - period - surface_eq = (math.log( - hazard) - (cutoff_period * a12_param + a11_param)) / ( - a13_param + a14_param * cutoff_period) - probability = norm.cdf(surface_eq + multiplier * ( - math.log(hazard) - a21_param) / a22_param) - else: - probability = norm.cdf( - (math.log(hazard) - (a11_param + a12_param * period)) / ( - a13_param + a14_param * period)) - - return probability diff --git a/pyincore/models/periodstandardfragilitycurve.py b/pyincore/models/periodstandardfragilitycurve.py deleted file mode 100644 index f78253662..000000000 --- a/pyincore/models/periodstandardfragilitycurve.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) 2019 University of Illinois and others. All rights reserved. -# -# This program and the accompanying materials are made available under the -# terms of the Mozilla Public License v2.0 which accompanies this distribution, -# and is available at https://www.mozilla.org/en-US/MPL/2.0/ - - -import math - -from scipy.stats import norm - -from pyincore.models.fragilitycurve import FragilityCurve -from deprecated.sphinx import deprecated - - -class PeriodStandardFragilityCurve(FragilityCurve): - """A class to represent period standard fragility curve""" - - def __init__(self, curve_parameters): - self.alpha = curve_parameters['alpha'] - self.beta = curve_parameters['beta'] - self.alpha_type = curve_parameters['alphaType'] - self.curve_type = curve_parameters['curveType'] - self.period_param2 = curve_parameters['periodParam2'] - self.period_param1 = curve_parameters['periodParam1'] - self.period_param0 = curve_parameters['periodParam0'] - self.period_eqn_type = curve_parameters['periodEqnType'] - - super(PeriodStandardFragilityCurve, self).__init__(curve_parameters) - - def get_building_period(self, num_stories): - """Get building period from the fragility curve. - - Args: - num_stories (int): Number of building stories. - - Returns: - float: Building period. - - """ - period = 0.0 - period_equation_type = self.period_eqn_type - if period_equation_type == 1: - period = self.period_param0 - elif period_equation_type == 2: - period = self.period_param0 * num_stories - elif period_equation_type == 3: - period = self.period_param1 * math.pow( - self.period_param0 * num_stories, - self.period_param2) - - return period - - def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): - """Computes limit state probabilities. - - Args: - hazard (float): A hazard value to compute probability for. - period (float): A building period default to 0. - std_dev (float): A standard deviation. - **kwargs: Keyword arguments. - - Returns: - float: A limit state probability. - - """ - probability = float(0.0) - - if hazard > 0.0: - alpha = float(self.alpha) - beta = math.sqrt(math.pow(self.beta, 2) + math.pow(std_dev, 2)) - - if self.alpha_type == 'median': - sp = (math.log(hazard) - math.log(alpha)) / beta - probability = norm.cdf(sp) - elif self.alpha_type == "lambda": - x = (math.log(hazard) - alpha) / beta - probability = norm.cdf(x) - - return probability - - @deprecated(version="0.9.7", reason="This method is already incorporated in refactored fragility curves and will " - "be deprecated") - def adjust_fragility_for_liquefaction(self, liquefaction: str): - """Adjusts fragility curve object by input parameter liquefaction. - - Args: - liquefaction (str): Liquefaction type. - - """ - liquefaction_unified = str(liquefaction).upper() - if liquefaction_unified == "U": - multiplier = 0.85 - elif liquefaction_unified == "Y": - multiplier = 0.65 - else: - multiplier = 1.0 - - self.alpha = self.alpha * multiplier - self.beta = self.beta diff --git a/pyincore/models/repaircurveset.py b/pyincore/models/repaircurveset.py index c5f11ff2b..0e88a5841 100644 --- a/pyincore/models/repaircurveset.py +++ b/pyincore/models/repaircurveset.py @@ -5,7 +5,7 @@ # and is available at https://www.mozilla.org/en-US/MPL/2.0/ import json -from pyincore.models.standardfragilitycurve import StandardFragilityCurve +from pyincore.models.standardrepaircurve import StandardRepairCurve class RepairCurveSet: @@ -35,9 +35,7 @@ def __init__(self, metadata): if 'repairCurves' in metadata.keys(): for repair_curve in metadata["repairCurves"]: if repair_curve['className'] == 'StandardRepairCurve': - # Using StandardFragilityCurve for now instead of creating a StandardRepairCurve because it will be - # deprecated and repair curves will be using the expression based format the in near future. - self.repair_curves.append(StandardFragilityCurve(repair_curve)) + self.repair_curves.append(StandardRepairCurve(repair_curve)) else: # TODO make a custom repair curve class that accept other formats self.repair_curves.append(repair_curve) diff --git a/pyincore/models/standardfragilitycurve.py b/pyincore/models/standardrepaircurve.py similarity index 61% rename from pyincore/models/standardfragilitycurve.py rename to pyincore/models/standardrepaircurve.py index 05acb66cb..98a52e612 100644 --- a/pyincore/models/standardfragilitycurve.py +++ b/pyincore/models/standardrepaircurve.py @@ -9,21 +9,18 @@ from scipy.stats import norm -from pyincore.models.fragilitycurve import FragilityCurve -from deprecated.sphinx import deprecated - -class StandardFragilityCurve(FragilityCurve): - """A class to represent standard fragility curve""" +# TODO: This will be deprecated when repair curves are migrated to expression based format. +class StandardRepairCurve: + """A class to represent standard Repair curve.""" def __init__(self, curve_parameters): + self.description = curve_parameters['description'] self.alpha = curve_parameters['alpha'] self.beta = curve_parameters['beta'] self.alpha_type = curve_parameters['alphaType'] self.curve_type = curve_parameters['curveType'] - super(StandardFragilityCurve, self).__init__(curve_parameters) - def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev: float = 0.0, **kwargs): """Computes limit state probabilities. @@ -52,22 +49,3 @@ def calculate_limit_state_probability(self, hazard, period: float = 0.0, std_dev return probability - @deprecated(version="0.9.7", reason="This method is already incorporated in refactored fragility curves and will " - "be deprecated") - def adjust_fragility_for_liquefaction(self, liquefaction: str): - """Adjusts fragility curve object by input parameter liquefaction. - - Args: - liquefaction (str): Liquefaction type. - - """ - liquefaction_unified = str(liquefaction).upper() - if liquefaction_unified == "U": - multiplier = 0.85 - elif liquefaction_unified == "Y": - multiplier = 0.65 - else: - multiplier = 1.0 - - self.alpha = self.alpha * multiplier - self.beta = self.beta diff --git a/pyincore/utils/analysisutil.py b/pyincore/utils/analysisutil.py index 364351391..d6c60d2bd 100644 --- a/pyincore/utils/analysisutil.py +++ b/pyincore/utils/analysisutil.py @@ -295,20 +295,23 @@ def create_gdocstr_from_spec(specs): rets = "" for dataset in specs['input_datasets']: - isOpt = "" + is_opt = "" if not dataset['required']: - isOpt = ", " + "optional" + is_opt = ", " + "optional" - args = args + dataset['id'] + "(str" + isOpt + ") : " + dataset['description'] + ". " \ - + AnalysisUtil.get_custom_types_str(dataset['type']) + "\n\t" + args = \ + args + dataset['id'] + "(str" + is_opt + ") : " \ + + dataset['description'] + ". " \ + + AnalysisUtil.get_custom_types_str(dataset['type']) + "\n\t" for param in specs['input_parameters']: - isOpt = "" + is_opt = "" if not param['required']: - isOpt = ", " + "optional" + is_opt = ", " + "optional" - args = args + param['id'] + "(" \ - + AnalysisUtil.get_type_str(param['type']) + isOpt + ") : " + param['description'] + "\n\t" + args = \ + args + param['id'] + "(" + AnalysisUtil.get_type_str(param['type']) + is_opt + ") : " \ + + param['description'] + "\n\t" for dataset in specs['output_datasets']: rets = rets + dataset['id'] + ": " \ diff --git a/pyincore/utils/cgeoutputprocess.py b/pyincore/utils/cgeoutputprocess.py index a5d18ec2e..35dafe89c 100644 --- a/pyincore/utils/cgeoutputprocess.py +++ b/pyincore/utils/cgeoutputprocess.py @@ -18,6 +18,7 @@ def get_cge_household_count(household_count, household_count_path=None, filename { "beforeEvent": {"HH1": 3611, "HH2": 5997.0, "HH3": 7544.1, "HH4": 2394.1, "HH5": 793.0}, "afterEvent": {"HH1": 3588, "HH2": 5929.8, "HH3": 7324.1, "HH4": 2207.5, "HH5": 766.4}, + "%_change": {"HH1": -0.6369, "HH2": -1.1, "HH3": -2.92, "HH4": -7.8, "HH5": -3.35} } Args: @@ -42,11 +43,16 @@ def get_cge_household_count(household_count, household_count_path=None, filename before_event = {} after_event = {} + pct_change = {} for i in range(len(income_categories)): before_event[income_categories[i]] = before_values[i] after_event[income_categories[i]] = after_values[i] + if before_values[i]: + pct_change[income_categories[i]] = 100 * ((after_values[i] - before_values[i]) / abs(before_values[i])) + else: + pct_change[income_categories[i]] = None - cge_total_household_count = {"beforeEvent": before_event, "afterEvent": after_event} + cge_total_household_count = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} if filename_json: with open(filename_json, "w") as outfile: @@ -61,6 +67,7 @@ def get_cge_gross_income(gross_income, gross_income_path=None, filename_json=Non { "beforeEvent": {"HH1": 13, "HH2": 153.5, "HH3": 453.1, "HH4": 438.9, "HH5": 125.0}, "afterEvent": {"HH1": 13, "HH2": 152.5, "HH3": 445.6, "HH4": 432.9, "HH5": 124.5}, + "%_change": {"HH1": -0, "HH2": -x.x, "HH3": -x.x, "HH4": -x.x, "HH5": -x.x} } Args: @@ -85,11 +92,16 @@ def get_cge_gross_income(gross_income, gross_income_path=None, filename_json=Non before_event = {} after_event = {} + pct_change = {} for i in range(len(income_categories)): before_event[income_categories[i]] = before_values[i] after_event[income_categories[i]] = after_values[i] + if before_values[i]: + pct_change[income_categories[i]] = 100 * ((after_values[i] - before_values[i]) / abs(before_values[i])) + else: + pct_change[income_categories[i]] = None - cge_total_household_income = {"beforeEvent": before_event, "afterEvent": after_event} + cge_total_household_income = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} if filename_json: with open(filename_json, "w") as outfile: @@ -109,7 +121,8 @@ def get_cge_employment(pre_demand, post_demand, "Trade": 8876, "Other": 23767 }, - "beforeEvent": {"Goods": 6744, "Trade": 8940, "Other": 24147} + "beforeEvent": {"Goods": 6744, "Trade": 8940, "Other": 24147}, + "%_change": {"Goods": -0, "Trade": -x.x, "Other": -x.x} } Args: @@ -145,11 +158,16 @@ def get_cge_employment(pre_demand, post_demand, before_event = {} after_event = {} + pct_change = {} for i in range(len(demand_categories)): before_event[demand_categories[i]] = before_values[i] after_event[demand_categories[i]] = after_values[i] + if before_values[i]: + pct_change[demand_categories[i]] = 100 * ((after_values[i] - before_values[i]) / abs(before_values[i])) + else: + pct_change[demand_categories[i]] = None - cge_employment = {"beforeEvent": before_event, "afterEvent": after_event} + cge_employment = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} if filename_json: with open(filename_json, "w") as outfile: @@ -165,7 +183,9 @@ def get_cge_domestic_supply(domestic_supply, domestic_supply_path=None, filename "afterEvent": {"Goods": 662.3, "Trade": 209.0, "Other": 254.1, "HS1": 22.0, "HS2": 1337.1, "HS3": 466.2}, "beforeEvent": {"Goods": 662.3, "Trade": 209.0, "Other": 254.1, - "HS1": 22.0, "HS2": 1337.1, "HS3": 466.2} + "HS1": 22.0, "HS2": 1337.1, "HS3": 466.2}, + "%_change": {"Goods": -1.1, "Trade": -1.1, "Other": -1.1, + "HS1": -1.1, "HS2": -1.1, "HS3": -1.1} } Args: @@ -189,11 +209,16 @@ def get_cge_domestic_supply(domestic_supply, domestic_supply_path=None, filename before_event = {} after_event = {} + pct_change = {} for i in range(len(supply_categories)): before_event[supply_categories[i]] = before_values[i] after_event[supply_categories[i]] = after_values[i] + if before_values[i]: + pct_change[supply_categories[i]] = 100 * ((after_values[i] - before_values[i]) / abs(before_values[i])) + else: + pct_change[supply_categories[i]] = None - cge_domestic_supply = {"beforeEvent": before_event, "afterEvent": after_event} + cge_domestic_supply = {"beforeEvent": before_event, "afterEvent": after_event, "%_change": pct_change} if filename_json: with open(filename_json, "w") as outfile: diff --git a/pyincore/utils/popdisloutputprocess.py b/pyincore/utils/popdisloutputprocess.py new file mode 100644 index 000000000..3d32d92e2 --- /dev/null +++ b/pyincore/utils/popdisloutputprocess.py @@ -0,0 +1,523 @@ +# Copyright (c) 2021 University of Illinois and others. All rights reserved. +# +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +import json +import pandas as pd +import geopandas as gpd +from shapely import wkt + + +class PopDislOutputProcess: + """This class converts csv results outputs of Population dislocation analysis to json format and shapefiles. + + Args: + pop_disl_result (obj): IN-CORE dataset for Joplin Population Dislocation (PD) results. + pop_disl_result_path (obj): A fallback for the case that Joplin PD object is not provided. + For example a user wants to directly pass in csv files, a path to PD results. + filter_on (bool): A flag to filter all data, default True counts only Joplin buildings + + """ + HUPD_CATEGORIES = ["household_characteristics", + "household_dislocated", + "total_households", + "%_household_dislocated", + "population_dislocated", + "total_population", + "%_population_dislocated" + ] + + def __init__(self, pop_disl_result, pop_disl_result_path=None, filter_on=True): + if pop_disl_result_path: + pd_result = pd.read_csv(pop_disl_result_path, low_memory=False) + else: + pd_result = pop_disl_result.get_dataframe_from_csv(low_memory=False) + pd_result["geometry"] = pd_result["geometry"].apply(wkt.loads) + + pd_result_shp = None + # keep only inventory with guid; filter for Joplin since only Joplin inventory has guids + if filter_on: + pd_result = pd_result[(pd_result["guid"].notnull()) & + (pd_result["numprec"].notnull())] + # only keep guid and dislocated + pd_result_shp = pd_result[(pd_result["dislocated"]) & + (pd_result["guid"].notnull()) & + (pd_result["numprec"].notnull())] + self.pop_disl_result = pd_result + self.pop_disl_result_shp = pd_result_shp + + def get_heatmap_shp(self, filename="pop-disl-numprec.shp"): + """ Convert and filter population dislocation output to shapefile that contains only guid and numprec columns + + Args: + filename (str): Path and name to save shapefile output file in. E.g "heatmap.shp" + + Returns: + str: full path and filename of the shapefile + + """ + df = self.pop_disl_result_shp + + # save as shapefile + gdf = gpd.GeoDataFrame(df, crs='epsg:4326') + gdf = gdf[["guid", "numprec", "geometry"]] + gdf.to_file(filename) + + return filename + + def pd_by_race(self, filename_json=None): + """ Calculate race results from the output files of the Joplin Population Dislocation analysis + and convert the results to json format. + [ + {"household_characteristics": "Not Hispanic/White", + "household_dislocated": 1521, + "total_households": 18507, + "%_household_dislocated": 7.3, + "population_dislocated", + "total_population", + "%_population_dislocated" + },{"household_characteristics": "Not Hispanic/Black",..,..},{},{}, + {"No race Ethnicity Data"},{"Total"} + ] + + Args: + filename_json (str): Path and name to save json output file in. E.g "pd_race_count.json" + + Returns: + obj: PD total count by race. A JSON of the hua and population dislocation race results by category. + + """ + # Race categories + # The numbering follows the Community description notebook + # 0 - Vacant HU No Race Ethnicity Data, 1 - Not Hispanic/White, 2 - Not Hispanic/Black + # 3 - Not Hispanic/Other race, 4 - Hispanic, 5 - No Race or Ethnicity Data + race_categories = ["Not Hispanic/White", + "Not Hispanic/Black", + "Not Hispanic/Other Race", + "Hispanic", + "No Race or Ethnicity Data", + "Total"] + + huapd = self.pop_disl_result + # Allocated by race and ethnicity + huapd["hua_re"] = "0" + huapd.loc[(huapd["race"] == 1) & (huapd["hispan"] == 0), "hua_re"] = "1" + huapd.loc[(huapd["race"] == 2) & (huapd["hispan"] == 0), "hua_re"] = "2" + huapd.loc[(huapd["race"].isin([3, 4, 5, 6, 7])) & (huapd["hispan"] == 0), "hua_re"] = "3" + huapd.loc[(huapd["hispan"] == 1), "hua_re"] = "4" + huapd.loc[(huapd["gqtype"] >= 1), "hua_re"] = "5" + hua_vals = huapd["hua_re"].value_counts() + hua_tot = [] + for i in range(len(race_categories)): + try: + hua_tot.append(int(hua_vals[str(i)])) + except Exception: + hua_tot.append(0) + hua_tot.append(int(sum(hua_tot[1:]))) + + pop_tot = [] + for i in range(len(race_categories)): + pop_tot.append(int(huapd["numprec"].where(huapd["hua_re"] == str(i)).sum())) + pop_tot.append(int(sum(pop_tot[1:]))) + + # Dislocated by race and ethnicity + huapd["hud_re"] = "0" + huapd.loc[(huapd["race"] == 1) & (huapd["hispan"] == 0) & huapd["dislocated"], "hud_re"] = "1" + huapd.loc[(huapd["race"] == 2) & (huapd["hispan"] == 0) & huapd["dislocated"], "hud_re"] = "2" + huapd.loc[(huapd["race"].isin([3, 4, 5, 6, 7])) & (huapd["hispan"] == 0) & huapd["dislocated"], "hud_re"] = "3" + huapd.loc[(huapd["hispan"] == 1) & huapd["dislocated"], "hud_re"] = "4" + huapd.loc[(huapd["gqtype"] >= 1) & huapd["dislocated"], "hud_re"] = "5" + hud_vals = huapd["hud_re"].value_counts() + hua_disl = [] + for i in range(len(race_categories)): + try: + hua_disl.append(int(hud_vals[str(i)])) + except Exception: + hua_disl.append(0) + hua_disl.append(int(sum(hua_disl[1:]))) + + pd_disl = [] + for i in range(len(race_categories)): + pd_disl.append(int(huapd["numprec"].where(huapd["hud_re"] == str(i)).sum())) + pd_disl.append(int(sum(pd_disl[1:]))) + + pd_by_race_json = [] + for i in range(len(race_categories)): + huapd_race = {} + huapd_race[self.HUPD_CATEGORIES[0]] = race_categories[i] + huapd_race[self.HUPD_CATEGORIES[1]] = hua_disl[i + 1] + huapd_race[self.HUPD_CATEGORIES[2]] = hua_tot[i + 1] + if hua_tot[i + 1]: + huapd_race[self.HUPD_CATEGORIES[3]] = 100 * (hua_disl[i + 1] / hua_tot[i + 1]) + else: + huapd_race[self.HUPD_CATEGORIES[3]] = None + huapd_race[self.HUPD_CATEGORIES[4]] = pd_disl[i + 1] + huapd_race[self.HUPD_CATEGORIES[5]] = pop_tot[i + 1] + if pop_tot[i + 1]: + huapd_race[self.HUPD_CATEGORIES[6]] = 100 * (pd_disl[i + 1] / pop_tot[i + 1]) + else: + huapd_race[self.HUPD_CATEGORIES[6]] = None + pd_by_race_json.append(huapd_race) + # print(pd_by_race_json) + + if filename_json: + with open(filename_json, "w") as outfile: + json.dump(pd_by_race_json, outfile) + # Serializing json + return json.dumps(pd_by_race_json) + + def pd_by_income(self, filename_json=None): + """ Calculate income results from the output files of the Joplin Population Dislocation analysis + and convert the results to json format. + [ + {"household_characteristics": "HH1 (less than $15,000)", + "household_dislocated": 311, + "total_households": 3252, + "%_household_dislocated": 7.3, + "population_dislocated": 311, + "total_population": 3252, + "%_population_dislocated" + }, + {"HH2 ($15,000 to $35,000)",..,..,..,..},{},{},{},{}, + {"Unknown",..,..,..,..} + ] + + Args: + filename_json (str): Path and name to save json output file in. E.g "pd_income_count.json" + + Returns: + obj: PD total count by income. A JSON of the hua and population dislocation income results by category. + + """ + income_categories = ["HH1 (less than $15,000)", + "HH2 ($15,000 to $35,000)", + "HH3 ($35,000 to $70,000)", + "HH4 ($70,000 to $120,000)", + "HH5 (More than $120,000)", + "Unknown", + "Total"] + + huapd = self.pop_disl_result + # Allocated by income + hua_tot = [] + for i in range(1, 6): + alloc_inc = (huapd["hhinc"] == i).sum() + hua_tot.append(int(alloc_inc)) + hua_tot.append(int(pd.isna(huapd["hhinc"]).sum())) + hua_tot.append(int(sum(hua_tot))) + + pop_tot = [] + for i in range(1, 6): + alloc = huapd["numprec"].where(huapd["hhinc"] == i).sum() + pop_tot.append(int(alloc)) + pop_tot.append(int(huapd["numprec"].where(pd.isna(huapd["hhinc"])).sum())) + pop_tot.append(int(sum(pop_tot))) + + # Dislocated by income + hua_disl = [] + for i in range(1, 6): + disl = huapd.loc[(huapd["hhinc"] == i) & huapd["dislocated"], ["dislocated"]].sum() + hua_disl.append(int(disl)) + disl_unknown = huapd.loc[pd.isna(huapd["hhinc"]) & huapd["dislocated"], ["dislocated"]].sum() + hua_disl.append(int(disl_unknown)) + hua_disl.append(int(sum(hua_disl))) + + pd_disl = [] + for i in range(1, 6): + disl = huapd.loc[(huapd["hhinc"] == i) & huapd["dislocated"], ["numprec"]].sum() + pd_disl.append(int(disl)) + disl_unknown = huapd.loc[pd.isna(huapd["hhinc"]) & huapd["dislocated"], ["numprec"]].sum() + pd_disl.append(int(disl_unknown)) + pd_disl.append(int(sum(pd_disl))) + + pd_by_income_json = [] + for i in range(len(income_categories)): + huapd_income = {} + huapd_income[self.HUPD_CATEGORIES[0]] = income_categories[i] + huapd_income[self.HUPD_CATEGORIES[1]] = hua_disl[i] + huapd_income[self.HUPD_CATEGORIES[2]] = hua_tot[i] + if hua_tot[i]: + huapd_income[self.HUPD_CATEGORIES[3]] = 100 * (hua_disl[i] / hua_tot[i]) + else: + huapd_income[self.HUPD_CATEGORIES[3]] = None + huapd_income[self.HUPD_CATEGORIES[4]] = pd_disl[i] + huapd_income[self.HUPD_CATEGORIES[5]] = pop_tot[i] + if pop_tot[i]: + huapd_income[self.HUPD_CATEGORIES[6]] = 100 * (pd_disl[i] / pop_tot[i]) + else: + huapd_income[self.HUPD_CATEGORIES[6]] = None + pd_by_income_json.append(huapd_income) + # print(pd_by_income_json) + + if filename_json: + with open(filename_json, "w") as outfile: + json.dump(pd_by_income_json, outfile) + # Serializing json + return json.dumps(pd_by_income_json) + + def pd_by_tenure(self, filename_json=None): + """ Calculate tenure results from the output files of the Joplin Population Dislocation analysis + and convert the results to json format. + [ + {"household_characteristics": "Owner occupied", + "household_dislocated": 1018, + "total_households": 11344, + "%_household_dislocated": 7.3, + "population_dislocated": 1018, + "total_population": 11344, + "%_population_dislocated" + }, + {"household_characteristics": "Renter occupied",..,..,..,..},{},{},{},{},{}, + {"total",..,..,..,..} + ] + + Args: + filename_json (str): Path and name to save json output file in. E.g "pd_income_count.json" + + Returns: + obj: PD total count by income. A JSON of the hua and population dislocation income results by category. + + """ + # Tenure categories + # The numbering follows the Community description notebook + # 0 - Vacant HU No Tenure Data, 1 - Owner occupied, 2 - Renter occupied, + # 3 - Nursing facilities, 4 - Other group quarters, 5 - Vacant for rent + # 6 - Vacant for sale, 7 - Vacant other + tenure_categories = ["Owner occupied", + "Renter occupied", + "Nursing facilities", + "Other group quarters", + "Vacant for rent", + "Vacant for sale", + "Vacant other", + "Total"] + + huapd = self.pop_disl_result + # Allocated by tenure + huapd["hua_tnr"] = "0" + huapd.loc[huapd["ownershp"] == 1.0, "hua_tnr"] = "1" + huapd.loc[huapd["ownershp"] == 2.0, "hua_tnr"] = "2" + huapd.loc[huapd["gqtype"] == 3, "hua_tnr"] = "3" + huapd.loc[huapd["gqtype"].isin([1, 2, 4, 5, 6, 7, 8]), "hua_tnr"] = "4" + huapd.loc[huapd["vacancy"].isin([1, 2]), "hua_tnr"] = "5" + huapd.loc[huapd["vacancy"].isin([3, 4]), "hua_tnr"] = "6" + huapd.loc[huapd["vacancy"].isin([5, 6, 7]), "hua_tnr"] = "7" + hua_vals = huapd["hua_tnr"].value_counts() + hua_tot = [] + for i in range(len(tenure_categories)): + try: + hua_tot.append(int(hua_vals[str(i)])) + except Exception: + hua_tot.append(0) + hua_tot.append(int(sum(hua_tot[1:]))) + + pop_tot = [] + for i in range(len(tenure_categories)): + pop_tot.append(int(huapd["numprec"].where(huapd["hua_tnr"] == str(i)).sum())) + pop_tot.append(int(sum(pop_tot[1:]))) + + # Dislocated by tenure + huapd["hud_tnr"] = "0" + huapd.loc[(huapd["ownershp"] == 1.0) & huapd["dislocated"], "hud_tnr"] = "1" + huapd.loc[(huapd["ownershp"] == 2.0) & huapd["dislocated"], "hud_tnr"] = "2" + huapd.loc[(huapd["gqtype"] == 3) & huapd["dislocated"], "hud_tnr"] = "3" + huapd.loc[huapd["gqtype"].isin([1, 2, 4, 5, 6, 7, 8]) & huapd["dislocated"], "hud_tnr"] = "4" + huapd.loc[huapd["vacancy"].isin([1, 2]) & huapd["dislocated"], "hud_tnr"] = "5" + huapd.loc[huapd["vacancy"].isin([3, 4]) & huapd["dislocated"], "hud_tnr"] = "6" + huapd.loc[huapd["vacancy"].isin([5, 6, 7]) & huapd["dislocated"], "hud_tnr"] = "7" + hud_vals = huapd["hud_tnr"].value_counts() + hua_disl = [] + for i in range(len(tenure_categories)): + try: + hua_disl.append(int(hud_vals[str(i)])) + except Exception: + hua_disl.append(0) + hua_disl.append(int(sum(hua_disl[1:]))) + + pd_disl = [] + for i in range(len(tenure_categories)): + pd_disl.append(int(huapd["numprec"].where(huapd["hud_tnr"] == str(i)).sum())) + pd_disl.append(int(sum(pd_disl[1:]))) + + pd_by_tenure_json = [] + for i in range(len(tenure_categories)): + huapd_tenure = {} + huapd_tenure[self.HUPD_CATEGORIES[0]] = tenure_categories[i] + huapd_tenure[self.HUPD_CATEGORIES[1]] = hua_disl[i + 1] + huapd_tenure[self.HUPD_CATEGORIES[2]] = hua_tot[i + 1] + if hua_tot[i + 1]: + huapd_tenure[self.HUPD_CATEGORIES[3]] = 100 * (hua_disl[i + 1] / hua_tot[i + 1]) + else: + huapd_tenure[self.HUPD_CATEGORIES[3]] = None + huapd_tenure[self.HUPD_CATEGORIES[4]] = pd_disl[i + 1] + huapd_tenure[self.HUPD_CATEGORIES[5]] = pop_tot[i + 1] + if pop_tot[i + 1]: + huapd_tenure[self.HUPD_CATEGORIES[6]] = 100 * (pd_disl[i + 1] / pop_tot[i + 1]) + else: + huapd_tenure[self.HUPD_CATEGORIES[6]] = None + pd_by_tenure_json.append(huapd_tenure) + # print(pd_by_tenure_json) + + if filename_json: + with open(filename_json, "w") as outfile: + json.dump(pd_by_tenure_json, outfile) + # Serializing json + return json.dumps(pd_by_tenure_json) + + def pd_by_housing(self, filename_json=None): + """ Calculate housing results from the output files of the Joplin Population Dislocation analysis + using huestimate column (huestimate = 1 is single family, huestimate > 1 means multi family house) + and convert the results to json format. + [ + {"household_characteristics": "Single Family", + "household_dislocated": 1162, + "total_households": 837, + "%_household_dislocated": 7.3, + "population_dislocated": 1162, + "total_population": 837, + "%_population_dislocated" + },{},{"Total",..,..,..,..} + ] + + Args: + filename_json (str): Path and name to save json output file in. E.g "pd_housing_count.json" + + Returns: + obj: PD total count by housing. A JSON of the hua and population dislocation housing results by category. + + """ + # Household categories + # 0 - Vacant HU No Tenure Data, 1 - Single Family, 2 - Multi Family + household_categories = ["Single Family", + "Multi Family", + "Total"] + + huapd = self.pop_disl_result + # Allocated by housing + huapd["hua_house"] = "0" + huapd.loc[(huapd["huestimate"] == 1.0), "hua_house"] = "1" + huapd.loc[(huapd["huestimate"] > 1.0), "hua_house"] = "2" + hua_vals = huapd["hua_house"].value_counts() + hua_tot = [] + for i in range(len(household_categories)): + try: + hua_tot.append(int(hua_vals[str(i)])) + except Exception: + hua_tot.append(0) + hua_tot.append(int(sum(hua_tot[1:]))) + + pop_tot = [] + for i in range(len(household_categories)): + pop_tot.append(int(huapd["numprec"].where(huapd["hua_house"] == str(i)).sum())) + pop_tot.append(int(sum(pop_tot[1:]))) + + # Dislocated by household + huapd["hud_house"] = "0" + huapd.loc[(huapd["huestimate"] == 1.0) & huapd["dislocated"], "hud_house"] = "1" + huapd.loc[(huapd["huestimate"] > 1.0) & huapd["dislocated"], "hud_house"] = "2" + hud_vals = huapd["hud_house"].value_counts() + hua_disl = [] + for i in range(len(household_categories)): + try: + hua_disl.append(int(hud_vals[str(i)])) + except Exception: + hua_disl.append(0) + hua_disl.append(int(sum(hua_disl[1:]))) + + pd_disl = [] + for i in range(len(household_categories)): + pd_disl.append(int(huapd["numprec"].where(huapd["hud_house"] == str(i)).sum())) + pd_disl.append(int(sum(pd_disl[1:]))) + + pd_by_housing_json = [] + for i in range(len(household_categories)): + huapd_household = {} + huapd_household[self.HUPD_CATEGORIES[0]] = household_categories[i] + huapd_household[self.HUPD_CATEGORIES[1]] = hua_disl[i + 1] + huapd_household[self.HUPD_CATEGORIES[2]] = hua_tot[i + 1] + if hua_tot[i + 1]: + huapd_household[self.HUPD_CATEGORIES[3]] = 100 * (hua_disl[i + 1] / hua_tot[i + 1]) + else: + huapd_household[self.HUPD_CATEGORIES[3]] = None + huapd_household[self.HUPD_CATEGORIES[4]] = pd_disl[i + 1] + huapd_household[self.HUPD_CATEGORIES[5]] = pop_tot[i + 1] + if pop_tot[i + 1]: + huapd_household[self.HUPD_CATEGORIES[6]] = 100 * (pd_disl[i + 1] / pop_tot[i + 1]) + else: + huapd_household[self.HUPD_CATEGORIES[6]] = None + pd_by_housing_json.append(huapd_household) + # print(pd_by_housing_json) + + if filename_json: + with open(filename_json, "w") as outfile: + json.dump(pd_by_housing_json, outfile) + return json.dumps(pd_by_housing_json) + + def pd_total(self, filename_json=None): + """ Calculate total results from the output files of the Joplin Population Dislocation analysis + and convert the results to json format. + { "household_dislocated": { + "dislocated": { + "number": 1999, + "percentage": 0.085 + }, "not_dislocated": {}, "total": {} + },"population_dislocated": {"dislocated": {},"not_dislocated": {}, "total": {}} + } + + Args: + filename_json (str): Path and name to save json output file in. E.g "pd_total_count.json" + + Returns: + obj: PD total count. A JSON of the hua and population dislocation total results by category. + + """ + # Dislocated by race and ethnicity + hud = self.pop_disl_result + hud_vals = hud["dislocated"].value_counts() + hua_disl = [int(hud_vals[False]), int(hud_vals[True])] + + pd_disl = [] + pd_disl.append(int(hud["numprec"].where(hud["dislocated"] == 0).sum())) + pd_disl.append(int(hud["numprec"].where(hud["dislocated"] == 1).sum())) + + hua_tot = sum(hua_disl) + pop_tot = sum(pd_disl) + + hua_disl_tot = {} + if hua_tot: + hua_disl_tot["dislocated"] = {"households": hua_disl[1], + "%_of_households": 100 * (hua_disl[1]/hua_tot)} + hua_disl_tot["not_dislocated"] = {"households": hua_tot - hua_disl[1], + "%_of_households": 100 * ((hua_tot - hua_disl[1])/hua_tot)} + hua_disl_tot["total"] = {"households": hua_tot, "%_of_households": 100} + else: + hua_disl_tot["dislocated"] = {"households": None, + "%_of_households": None} + hua_disl_tot["not_dislocated"] = {"households": None, + "%_of_households": None} + hua_disl_tot["total"] = {"households": None, "%_of_households": None} + + pop_disl_tot = {} + if pop_tot: + pop_disl_tot["dislocated"] = {"population": pd_disl[1], + "%_of_population": 100 * (pd_disl[1]/pop_tot)} + pop_disl_tot["not_dislocated"] = {"population": pop_tot - pd_disl[1], + "%_of_population": 100 * ((pop_tot - pd_disl[1])/pop_tot)} + pop_disl_tot["total"] = {"population": pop_tot, "%_of_population": 100} + else: + pop_disl_tot["dislocated"] = {"population": None, + "%_of_population": None} + pop_disl_tot["not_dislocated"] = {"population": None, + "%_of_population": None} + pop_disl_tot["total"] = {"population": None, "%_of_population": None} + + pd_total_json = {"household_dislocation_in_total": hua_disl_tot, + "population_dislocation_in_total": pop_disl_tot} + # print(pd_total_json) + + if filename_json: + with open(filename_json, "w") as outfile: + json.dump(pd_total_json, outfile) + return json.dumps(pd_total_json) diff --git a/recipes/meta.yaml b/recipes/meta.yaml new file mode 100644 index 000000000..ac07cf5ef --- /dev/null +++ b/recipes/meta.yaml @@ -0,0 +1,81 @@ +{% set name = "pyincore" %} +{% set version = "1.0.0" %} + +package: + name: {{ name|lower }} + version: {{ version }} + +source: + path: /path/to/pyincore + # PR: https://opensource.ncsa.illinois.edu/bitbucket/projects/INCORE1/repos/pyincore/pull-requests/12/overview + +build: + # If this is a new build for the same version, increment the build + # number. If you do not include this key, it defaults to 0. + #number: 1 + noarch: python + script: "{{ PYTHON }} -m pip install --no-deps --ignore-installed -vv . " # verbose + +requirements: + build: + - python>=3.8 + - pip + - numpy 1.16 + + host: + - python>=3.6 + - pip + - numpy 1.16 + + run: + - python>=3.6 + - {{ pin_compatible('numpy') }} + - boto3 + - deprecated + - fiona>=1.8.4 + - geopandas>=0.6.1 + - jsonpickle>=1.1 + - libspatialindex>=1.9.0 + - networkx>=2.2 + - numpy>=1.16.1 + - pytest>=3.9.0 + - python-jose>=3.0 + - pyproj>=1.9.6 + - pyyaml>=3.13 + - pycodestyle>=2.6.0 + - pyomo>=5.6 + - owslib>=0.17.1 + - pandas>=0.24.1 + - rasterio>=1.0.18 + - requests>=2.21.0 + - rtree>=0.8.3 + - scipy>=1.2.0 + - shapely>=1.6.4.post1 + - wntr>=0.1.6 + - ipopt>=3.11 + +test: + # Python imports + imports: + - pyincore + - pyomo + + requires: + # Put any additional test requirements here. For example + - pytest>=3.9 + commands: + # You can put built-in test commands to be run here. Use this to test that the entry points work. + python -c "import pyincore; print('SUCCESS')" #; pyincore.test_client()" + # You can also put a file called run_test.py in the recipe that will be run at test time. + +about: + home: https://incore.ncsa.illinois.edu + license: MPL-2.0 + summary: 'Python library for IN-CORE (Interdependent Networked Community Resilience Modeling Environment)' + description: 'pyIncore is a component of IN-CORE. It is a python package consisting of two primary components: + 1) a set of service classes to interact with the IN-CORE web services, and 2) IN-CORE analyses. The pyIncore + package allows users to apply various hazards to infrastructure in selected areas, propagating the effect of + physical infrastructure damage and loss of functionality to social and economic impacts.' + dev_url: https://github.com/IN-CORE/pyincore + doc_url: https://incore.ncsa.illinois.edu/doc/incore + diff --git a/scripts/release-packages.yml b/scripts/release-packages.yml index ee4d75644..519ca72cc 100644 --- a/scripts/release-packages.yml +++ b/scripts/release-packages.yml @@ -1,7 +1,7 @@ # List of pyincore analyses to either include or exclude for release # If provided, 'excludes' takes preference over 'includeOnly' -version: '1.0.0' +version: '1.1.0' includeOnly: - buildingdamage diff --git a/setup.py b/setup.py index 6068db454..408405c0e 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ setup( name='pyincore', - version='1.0.0', + version='1.1.0', packages=find_packages(where=".", exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), include_package_data=True, package_data={ diff --git a/tests/data/fragility_curves/ConditionalStandardFragilityCurve_original.json b/tests/data/fragility_curves/ConditionalStandardFragilityCurve_original.json deleted file mode 100644 index 7f25b9cb5..000000000 --- a/tests/data/fragility_curves/ConditionalStandardFragilityCurve_original.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "className": "FragilitySet", - "demandTypes": [ - "Vmax" - ], - "demandUnits": [ - "ft/s" - ], - "fragilityCurves": [ - { - "className": "ConditionalStandardFragilityCurve", - "alpha": [ - 2.6, - 2.6 - ], - "beta": [ - 0.4, - 0.5 - ], - "alphaType": "median", - "curveType": "LogNormal", - "rules": { - "0": [ - "demand LT 3.2" - ], - "1": [ - "demand GE 3.2" - ] - }, - "description": "Slight" - }, - { - "className": "ConditionalStandardFragilityCurve", - "alpha": [ - 4.8, - 4.8 - ], - "beta": [ - 0.4, - 0.5 - ], - "alphaType": "median", - "curveType": "LogNormal", - "rules": { - "0": [ - "demand LT 3.2" - ], - "1": [ - "demand GE 3.2" - ] - }, - "description": "Moderate" - }, - { - "className": "ConditionalStandardFragilityCurve", - "alpha": [ - 8.4, - 8.4 - ], - "beta": [ - 0.4, - 0.5 - ], - "alphaType": "median", - "curveType": "LogNormal", - "rules": { - "0": [ - "demand LT 3.2" - ], - "1": [ - "demand GE 3.2" - ] - }, - "description": "Extensive" - }, - { - "className": "ConditionalStandardFragilityCurve", - "alpha": [ - 13.1, - 13.1 - ], - "beta": [ - 0.4, - 0.5 - ], - "alphaType": "median", - "curveType": "LogNormal", - "rules": { - "0": [ - "demand LT 3.2" - ], - "1": [ - "demand GE 3.2" - ] - }, - "description": "Complete" - } - ], - "description": "seaside epf substation conditional fragility", - "authors": [ - "Dylan Sanderson" - ], - "resultType": "Limit State", - "hazardType": "tsunami", - "inventoryType": "electric_facility", - "creator": "cwang138", - "id": "5ebb0db727e5d0c756a7575f" -} \ No newline at end of file diff --git a/tests/data/fragility_curves/ParametricFragilityCurve_original.json b/tests/data/fragility_curves/ParametricFragilityCurve_original.json deleted file mode 100644 index 943e3e027..000000000 --- a/tests/data/fragility_curves/ParametricFragilityCurve_original.json +++ /dev/null @@ -1,534 +0,0 @@ -{ - "className": "FragilitySet", - "demandTypes": [ - "PGA" - ], - "demandUnits": [ - "g" - ], - "fragilityCurves": [ - { - "className": "ParametricFragilityCurve", - "parameters": [ - { - "name": "Constant", - "unit": "", - "coefficient": -7.779087306, - "interceptTermDefault": 1.0 - }, - { - "name": "Demand", - "unit": "g", - "coefficient": 2.744724342, - "interceptTermDefault": 0.0 - }, - { - "name": "fc", - "unit": "MPa", - "coefficient": -0.02438759, - "interceptTermDefault": 33.78 - }, - { - "name": "fy", - "unit": "MPa", - "coefficient": -0.00180294, - "interceptTermDefault": 459.436160679934 - }, - { - "name": "EBL", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": -3.22 - }, - { - "name": "EBT", - "unit": "", - "coefficient": 0.242332657, - "interceptTermDefault": -2.3 - }, - { - "name": "FBL", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": -1.56 - }, - { - "name": "FBT", - "unit": "", - "coefficient": -0.242469282, - "interceptTermDefault": 0.99 - }, - { - "name": "pass_stf", - "unit": "N/mm/mm", - "coefficient": 0.0, - "interceptTermDefault": 21.545 - }, - { - "name": "act_stf", - "unit": "N/pile", - "coefficient": -4.8e-06, - "interceptTermDefault": 177795.0 - }, - { - "name": "rot_stf", - "unit": "N/mm/pile", - "coefficient": 0.0, - "interceptTermDefault": 175000.0 - }, - { - "name": "trns_stf", - "unit": "N/pile", - "coefficient": 0.0, - "interceptTermDefault": 222245.0 - }, - { - "name": "damp", - "unit": "", - "coefficient": -0.423772472, - "interceptTermDefault": 0.045 - }, - { - "name": "abt_gp_L1", - "unit": "mm", - "coefficient": 0.014277084, - "interceptTermDefault": 38.1 - }, - { - "name": "abt_gp_L2", - "unit": "mm", - "coefficient": -0.00512963, - "interceptTermDefault": 38.1 - }, - { - "name": "hng_gp_L1", - "unit": "mm", - "coefficient": -0.013161096, - "interceptTermDefault": 25.4 - }, - { - "name": "hng_gp_L2", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 25.4 - }, - { - "name": "spn_ln", - "unit": "mm", - "coefficient": 0.000278567, - "interceptTermDefault": 13950.0 - }, - { - "name": "col_ht", - "unit": "mm", - "coefficient": 0.001902897, - "interceptTermDefault": 4070.0 - }, - { - "name": "width", - "unit": "ft", - "coefficient": 2.45e-06, - "interceptTermDefault": 6350.0 - } - ], - "curveType": "Logit", - "description": "Slight" - }, - { - "className": "ParametricFragilityCurve", - "parameters": [ - { - "name": "Constant", - "unit": "", - "coefficient": -7.755722087, - "interceptTermDefault": 1.0 - }, - { - "name": "Demand", - "unit": "g", - "coefficient": 2.401040162, - "interceptTermDefault": 0.0 - }, - { - "name": "fc", - "unit": "MPa", - "coefficient": -0.00515359, - "interceptTermDefault": 33.78 - }, - { - "name": "fy", - "unit": "MPa", - "coefficient": -0.000574063, - "interceptTermDefault": 459.436160679934 - }, - { - "name": "EBL", - "unit": "", - "coefficient": 0.000701378, - "interceptTermDefault": -3.22 - }, - { - "name": "EBT", - "unit": "", - "coefficient": 0.096521252, - "interceptTermDefault": -2.3 - }, - { - "name": "FBL", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": -1.56 - }, - { - "name": "FBT", - "unit": "", - "coefficient": -0.106471211, - "interceptTermDefault": 0.99 - }, - { - "name": "pass_stf", - "unit": "N/mm/mm", - "coefficient": 0.0, - "interceptTermDefault": 21.545 - }, - { - "name": "act_stf", - "unit": "N/pile", - "coefficient": -7.52e-06, - "interceptTermDefault": 177795.0 - }, - { - "name": "rot_stf", - "unit": "N/mm/pile", - "coefficient": 0.0, - "interceptTermDefault": 175000.0 - }, - { - "name": "trns_stf", - "unit": "N/pile", - "coefficient": 0.0, - "interceptTermDefault": 222245.0 - }, - { - "name": "damp", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": 0.045 - }, - { - "name": "abt_gp_L1", - "unit": "mm", - "coefficient": 0.011928621, - "interceptTermDefault": 38.1 - }, - { - "name": "abt_gp_L2", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 38.1 - }, - { - "name": "hng_gp_L1", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 25.4 - }, - { - "name": "hng_gp_L2", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 25.4 - }, - { - "name": "spn_ln", - "unit": "mm", - "coefficient": 0.00023725, - "interceptTermDefault": 13950.0 - }, - { - "name": "col_ht", - "unit": "mm", - "coefficient": 0.001395892, - "interceptTermDefault": 4070.0 - }, - { - "name": "width", - "unit": "ft", - "coefficient": 1.48e-05, - "interceptTermDefault": 6350.0 - } - ], - "curveType": "Logit", - "description": "Moderate" - }, - { - "className": "ParametricFragilityCurve", - "parameters": [ - { - "name": "Constant", - "unit": "", - "coefficient": -10.04589897, - "interceptTermDefault": 1.0 - }, - { - "name": "Demand", - "unit": "g", - "coefficient": 2.739809257, - "interceptTermDefault": 0.0 - }, - { - "name": "fc", - "unit": "MPa", - "coefficient": 0.0, - "interceptTermDefault": 33.78 - }, - { - "name": "fy", - "unit": "MPa", - "coefficient": 0.0, - "interceptTermDefault": 459.436160679934 - }, - { - "name": "EBL", - "unit": "", - "coefficient": 0.030960656, - "interceptTermDefault": -3.22 - }, - { - "name": "EBT", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": -2.3 - }, - { - "name": "FBL", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": -1.56 - }, - { - "name": "FBT", - "unit": "", - "coefficient": -0.124315732, - "interceptTermDefault": 0.99 - }, - { - "name": "pass_stf", - "unit": "N/mm/mm", - "coefficient": 0.0, - "interceptTermDefault": 21.545 - }, - { - "name": "act_stf", - "unit": "N/pile", - "coefficient": -7.93e-06, - "interceptTermDefault": 177795.0 - }, - { - "name": "rot_stf", - "unit": "N/mm/pile", - "coefficient": 0.0, - "interceptTermDefault": 175000.0 - }, - { - "name": "trns_stf", - "unit": "N/pile", - "coefficient": 0.0, - "interceptTermDefault": 222245.0 - }, - { - "name": "damp", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": 0.045 - }, - { - "name": "abt_gp_L1", - "unit": "mm", - "coefficient": 0.008521309, - "interceptTermDefault": 38.1 - }, - { - "name": "abt_gp_L2", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 38.1 - }, - { - "name": "hng_gp_L1", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 25.4 - }, - { - "name": "hng_gp_L2", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 25.4 - }, - { - "name": "spn_ln", - "unit": "mm", - "coefficient": 0.000273989, - "interceptTermDefault": 13950.0 - }, - { - "name": "col_ht", - "unit": "mm", - "coefficient": 0.001510439, - "interceptTermDefault": 4070.0 - }, - { - "name": "width", - "unit": "ft", - "coefficient": 4.27e-05, - "interceptTermDefault": 6350.0 - } - ], - "curveType": "Logit", - "description": "Extensive" - }, - { - "className": "ParametricFragilityCurve", - "parameters": [ - { - "name": "Constant", - "unit": "", - "coefficient": -10.04589897, - "interceptTermDefault": 1.0 - }, - { - "name": "Demand", - "unit": "g", - "coefficient": 2.739809257, - "interceptTermDefault": 0.0 - }, - { - "name": "fc", - "unit": "MPa", - "coefficient": 0.0, - "interceptTermDefault": 33.78 - }, - { - "name": "fy", - "unit": "MPa", - "coefficient": 0.0, - "interceptTermDefault": 459.436160679934 - }, - { - "name": "EBL", - "unit": "", - "coefficient": 0.030960656, - "interceptTermDefault": -3.22 - }, - { - "name": "EBT", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": -2.3 - }, - { - "name": "FBL", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": -1.56 - }, - { - "name": "FBT", - "unit": "", - "coefficient": -0.124315732, - "interceptTermDefault": 0.99 - }, - { - "name": "pass_stf", - "unit": "N/mm/mm", - "coefficient": 0.0, - "interceptTermDefault": 21.545 - }, - { - "name": "act_stf", - "unit": "N/pile", - "coefficient": -7.93e-06, - "interceptTermDefault": 177795.0 - }, - { - "name": "rot_stf", - "unit": "N/mm/pile", - "coefficient": 0.0, - "interceptTermDefault": 175000.0 - }, - { - "name": "trns_stf", - "unit": "N/pile", - "coefficient": 0.0, - "interceptTermDefault": 222245.0 - }, - { - "name": "damp", - "unit": "", - "coefficient": 0.0, - "interceptTermDefault": 0.045 - }, - { - "name": "abt_gp_L1", - "unit": "mm", - "coefficient": 0.008521309, - "interceptTermDefault": 38.1 - }, - { - "name": "abt_gp_L2", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 38.1 - }, - { - "name": "hng_gp_L1", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 25.4 - }, - { - "name": "hng_gp_L2", - "unit": "mm", - "coefficient": 0.0, - "interceptTermDefault": 25.4 - }, - { - "name": "spn_ln", - "unit": "mm", - "coefficient": 0.000273989, - "interceptTermDefault": 13950.0 - }, - { - "name": "col_ht", - "unit": "mm", - "coefficient": 0.001510439, - "interceptTermDefault": 4070.0 - }, - { - "name": "width", - "unit": "ft", - "coefficient": 4.27e-05, - "interceptTermDefault": 6350.0 - } - ], - "curveType": "Logit", - "description": "Complete" - } - ], - "description": "MSSS_Steel_Bridge-aba", - "paperReference": { - "name": "Seismic Fragility of Railway Bridge Classes:Methods, Models, and Comparison with the State of the Art", - "doi": "10.1061/(ASCE)BE.1943-5592.0001485.", - "yearPublished": "2019" - }, - "authors": [ - "Sushreyo Misra", - "Jamie Ellen Padgett" - ], - "resultType": "Limit State", - "hazardType": "earthquake", - "inventoryType": "bridge", - "creator": "cwang138", - "id": "5ed6bfc35b6166000155d0d9" -} \ No newline at end of file diff --git a/tests/data/fragility_curves/PeriodBuildingFragilityCurve_original.json b/tests/data/fragility_curves/PeriodBuildingFragilityCurve_original.json deleted file mode 100644 index ac9e00074..000000000 --- a/tests/data/fragility_curves/PeriodBuildingFragilityCurve_original.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "className": "FragilitySet", - "legacyId": "SF_C1_10", - "hazardType": "earthquake", - "inventoryType": "building", - "authors": [ - "J.M.Bracci", - "A.M.Reinhorn", - "J.B.Mander" - ], - "paperReference": { - "name": "NCEER-92-0027", - "yearPublished": "1992" - }, - "description": "Gravity Load Designed Concrete Frames", - "resultType": "Limit State", - "demandTypes": [ - "Sa" - ], - "demandUnits": [ - "g" - ], - "fragilityCurves": [ - { - "className": "PeriodBuildingFragilityCurve", - "periodEqnType": 3, - "periodParam1": 0.097, - "periodParam2": 0.624, - "periodParam0": 13.0, - "fsParam0": -1.4297, - "fsParam1": -0.5882, - "fsParam2": 0.4863, - "fsParam3": 0.0237, - "fsParam4": 33.6713, - "fsParam5": 11.7658, - "description": "Immediate Occupancy" - }, - { - "className": "PeriodBuildingFragilityCurve", - "periodEqnType": 3, - "periodParam1": 0.097, - "periodParam2": 0.624, - "periodParam0": 13.0, - "fsParam0": -1.0435, - "fsParam1": -0.5021, - "fsParam2": 0.3111, - "fsParam3": 0.0851, - "fsParam4": 12.533, - "fsParam5": 3.892, - "description": "Life Safety" - }, - { - "className": "PeriodBuildingFragilityCurve", - "periodEqnType": 3, - "periodParam1": 0.097, - "periodParam2": 0.624, - "periodParam0": 13.0, - "fsParam0": -0.8408, - "fsParam1": -0.35, - "fsParam2": 0.3651, - "fsParam3": 0.0631, - "fsParam4": 6.4062, - "fsParam5": 2.6323, - "description": "Collapse Prevention" - } - ], - "creator": "incore", - "id": "5b47b2d8337d4a36187c6c05" -} \ No newline at end of file diff --git a/tests/data/fragility_curves/PeriodStandardFragilityCurve_original.json b/tests/data/fragility_curves/PeriodStandardFragilityCurve_original.json deleted file mode 100644 index 5ae8633e9..000000000 --- a/tests/data/fragility_curves/PeriodStandardFragilityCurve_original.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "className": "FragilitySet", - "legacyId": "SF_S1_212", - "hazardType": "earthquake", - "inventoryType": "building", - "authors": [ - "Elnashai and Jeong" - ], - "description": "Mid-Rise Steel Moment Frame", - "resultType": "Limit State", - "demandTypes": [ - "0.2 sec Sa" - ], - "demandUnits": [ - "g" - ], - "fragilityCurves": [ - { - "className": "PeriodStandardFragilityCurve", - "periodParam2": 0.0, - "periodParam1": 0.0, - "periodParam0": 1.08, - "periodEqnType": 1, - "beta": 0.836, - "curveType": "LogNormal", - "description": "Moderate", - "alpha": -0.576, - "alphaType": "lambda" - }, - { - "className": "PeriodStandardFragilityCurve", - "periodParam2": 0.0, - "periodParam1": 0.0, - "periodParam0": 1.08, - "periodEqnType": 1, - "beta": 0.911, - "curveType": "LogNormal", - "description": "Extensive", - "alpha": 0.23, - "alphaType": "lambda" - }, - { - "className": "PeriodStandardFragilityCurve", - "periodParam2": 0.0, - "periodParam1": 0.0, - "periodParam0": 1.08, - "periodEqnType": 1, - "beta": 1.05, - "curveType": "LogNormal", - "description": "Complete", - "alpha": 1.197, - "alphaType": "lambda" - } - ], - "creator": "incore", - "id": "5b47b2d7337d4a36187c61c9" -} \ No newline at end of file diff --git a/tests/data/fragility_curves/StandardFragilityCurve_original.json b/tests/data/fragility_curves/StandardFragilityCurve_original.json deleted file mode 100644 index 81369ebc1..000000000 --- a/tests/data/fragility_curves/StandardFragilityCurve_original.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "className": "FragilitySet", - "demandTypes": [ - "momentumFlux" - ], - "demandUnits": [ - "ft^3/s^2" - ], - "fragilityCurves": [ - { - "className": "StandardFragilityCurve", - "alpha": 1170.0, - "beta": 0.79, - "alphaType": "median", - "curveType": "LogNormal", - "description": "Moderate" - }, - { - "className": "StandardFragilityCurve", - "alpha": 3259.0, - "beta": 0.79, - "alphaType": "median", - "curveType": "LogNormal", - "description": "Extensive" - }, - { - "className": "StandardFragilityCurve", - "alpha": 5347.0, - "beta": 0.79, - "alphaType": "median", - "curveType": "LogNormal", - "description": "Complete" - } - ], - "description": "Concrete Moment frame 4 to 7", - "authors": [ - "FEMA" - ], - "resultType": "Limit State", - "hazardType": "tsunami", - "inventoryType": "building", - "creator": "incrtest", - "id": "5fb5984e96513b1f4bd7a0a3" -} \ No newline at end of file diff --git a/tests/pyincore/analyses/joplinempiricalrestoration/test_joplinempiricalrestoration.py b/tests/pyincore/analyses/joplinempiricalrestoration/test_joplinempiricalrestoration.py new file mode 100644 index 000000000..c85d8db50 --- /dev/null +++ b/tests/pyincore/analyses/joplinempiricalrestoration/test_joplinempiricalrestoration.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +from pyincore import IncoreClient, FragilityService, MappingSet +from pyincore.analyses.buildingdamage import BuildingDamage +from pyincore.analyses.joplinempiricalrestoration import JoplinEmpiricalRestoration +import pyincore.globals as pyglobals + + +def run_with_base_class(): + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + # Joplin tornado building damage + bldg_dataset_id = "5df7d0de425e0b00092d0082" # joplin building v6 + + bldg_dmg = BuildingDamage(client) + bldg_dmg.load_remote_input_dataset("buildings", bldg_dataset_id) + + mapping_id = "5e8e3a21eaa8b80001f04f1c" # 19 archetype, non-retrofit + fragility_service = FragilityService(client) + mapping_set = MappingSet(fragility_service.get_mapping(mapping_id)) + bldg_dmg.set_input_dataset("dfr3_mapping_set", mapping_set) + bldg_dmg.set_parameter("fragility_key", "Non-Retrofit Fragility ID Code") + + # The simulated EF-5 tornado shows geographical locations and the range of wind speed + # of tornado hazard in Joplin. + hazard_type = "tornado" + hazard_id = "5dfa32bbc0601200080893fb" + + result_name = "joplin_tornado_dmg_result" + bldg_dmg.set_parameter("result_name", result_name) + bldg_dmg.set_parameter("hazard_type", hazard_type) + bldg_dmg.set_parameter("hazard_id", hazard_id) + bldg_dmg.set_parameter("num_cpu", 4) + bldg_dmg.set_parameter("seed", 1000) + bldg_dmg.run_analysis() + # end of Building damage analysis + + # get csv results from Building damage analysis + building_dmg_result = bldg_dmg.get_output_dataset("ds_result") + building_dmg_result.get_dataframe_from_csv() + + restoration = JoplinEmpiricalRestoration(client) + + restoration.load_remote_input_dataset("buildings", bldg_dataset_id) + # restoration.load_remote_input_dataset("building_dmg", building_dmg_result) + restoration.set_input_dataset("building_dmg", building_dmg_result) + + result_name = "Joplin_empirical_restoration_result" + restoration.set_parameter("result_name", result_name) + restoration.set_parameter("target_functionality_level", 0) + # restoration.set_parameter("seed", 1234) + + # Run Analysis + restoration.run_analysis() + + +if __name__ == '__main__': + run_with_base_class() diff --git a/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py b/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py index 5737c0493..205f0a931 100644 --- a/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py +++ b/tests/pyincore/analyses/populationdislocation/test_populationdislocation.py @@ -14,8 +14,7 @@ def run_with_base_class(): # incore-dev building_dmg = "602d96e4b1db9c28aeeebdce" # dev Joplin # building_dmg = "602d975db1db9c28aeeebe35" # 15 guids test - dev Joplin - housing_unit_alloc = "5df7c989425e0b00092c5eb4" # dev Joplin - # housing_unit_alloc = "602ea965b1db9c28aeefa5d6" # 23 address ids test - dev Joplin + housing_unit_alloc = "61563545483ecb19e4304c2a" # dev Joplin bg_data = "5df7cb0b425e0b00092c9464" # Joplin 2ev2 value_loss = "602d508fb1db9c28aeedb2a5" diff --git a/tests/pyincore/models/test_dataset.py b/tests/pyincore/models/test_dataset.py new file mode 100644 index 000000000..4ae2fd8fc --- /dev/null +++ b/tests/pyincore/models/test_dataset.py @@ -0,0 +1,21 @@ +import pandas as pd + +from pyincore.dataset import Dataset + + +def test_from_dataframe(): + df = pd.DataFrame() + dataset = Dataset.from_csv_data(df, "empty.csv", "ergo:buildingDamageVer6") + assert dataset.data_type == "ergo:buildingDamageVer6" + + +def test_from_csv_data(): + result_data = [] + dataset = Dataset.from_csv_data(result_data, "empty.csv", "ergo:buildingDamageVer6") + assert dataset.data_type == "ergo:buildingDamageVer6" + + +def test_from_json_data(): + result_data = {} + dataset = Dataset.from_json_data(result_data, "empty.json", "incore:buildingDamageSupplement") + assert dataset.data_type == "incore:buildingDamageSupplement" diff --git a/tests/pyincore/models/test_fragilitycurverefactored.py b/tests/pyincore/models/test_fragilitycurverefactored.py index bf4cb96ad..39934c5fa 100644 --- a/tests/pyincore/models/test_fragilitycurverefactored.py +++ b/tests/pyincore/models/test_fragilitycurverefactored.py @@ -71,54 +71,7 @@ def test_create_fragility_set(): 0.8097974088), ]) def test_calculate_limit_state_probability(fragility_set, hazard_values, args, expected): - result = fragility_set.calculate_limit_state_refactored_w_conversion(hazard_values, **args) + result = fragility_set.calculate_limit_state(hazard_values, **args) assert np.isclose(result["LS_0"], expected) -@pytest.mark.parametrize("curve, hazard_val, refactored_curve, hazard_val_refactored, num_stories, inventory_type", [ - ("fragility_curves/ConditionalStandardFragilityCurve_original.json", 4, - "fragility_curves/ConditionalStandardFragilityCurve_refactored.json", {"Vmax": 4}, 1, "electric_facility"), - ("fragility_curves/ParametricFragilityCurve_original.json", 4, - "fragility_curves/ParametricFragilityCurve_refactored.json", {"PGA": 4}, 1, "bridge"), - ("fragility_curves/PeriodBuildingFragilityCurve_original.json", 0.05, - "fragility_curves/PeriodBuildingFragilityCurve_refactored.json", {"Sa": 0.05}, 6, "building"), - ("fragility_curves/PeriodStandardFragilityCurve_original.json", 4, - "fragility_curves/PeriodStandardFragilityCurve_refactored.json", {"0.2 sec Sa": 4}, 6, "building"), - ("fragility_curves/StandardFragilityCurve_original.json", 4, - "fragility_curves/StandardFragilityCurve_refactored.json", {"momentumFlux": 4}, 1, "building"), -]) -def test_curves_results(curve, hazard_val, refactored_curve, hazard_val_refactored, num_stories, inventory_type): - fragility_set = get_fragility_set(curve) - refactored_fragility_set = get_fragility_set(refactored_curve) - - # add period if applicable - - building_period = fragility_set.fragility_curves[0].get_building_period(num_stories) - if len(fragility_set.fragility_curves) <= 4: - result = fragility_set.calculate_limit_state_w_conversion(hazard_val, period=building_period, - inventory_type=inventory_type) - refactored_result = refactored_fragility_set.calculate_limit_state_refactored_w_conversion( - hazard_val_refactored, - num_stories=num_stories, - inventory_type=inventory_type - ) - - assert result == refactored_result - - # no longer handle fragility curves > 4, test if can catch this error - else: - with pytest.raises(ValueError): - refactored_result = refactored_fragility_set.calculate_limit_state_refactored_w_conversion( - hazard_val_refactored, - num_stories=1) - - -# @pytest.mark.parametrize("fragility_set,args,expected", [ -# (get_remote_fragility_set("5b47b2d7337d4a36187c61c9"), {}, 1.08), -# # "(0.097) * math.pow(num_stories * (13.0), 0.624)" -# (get_remote_fragility_set("5b47b2d8337d4a36187c6c05"), {"num_stories": 2}, 0.7408241022436427), -# (get_remote_fragility_set("5b47b2d8337d4a36187c6c05"), {}, 0.4806980784822461), -# ]) -# def test_get_building_period(fragility_set, args, expected): -# fragility_curve = fragility_set.fragility_curves[0] -# assert fragility_curve.get_building_period(fragility_set.fragility_curve_parameters, **args) == expected diff --git a/tests/pyincore/test_hazardservice.py b/tests/pyincore/test_hazardservice.py index a7e72f77a..9b0ce0c98 100644 --- a/tests/pyincore/test_hazardservice.py +++ b/tests/pyincore/test_hazardservice.py @@ -4,9 +4,9 @@ # terms of the Mozilla Public License v2.0 which accompanies this distribution, # and is available at https://www.mozilla.org/en-US/MPL/2.0/ import os - import numpy as np import pytest +import requests.exceptions from pyincore import globals as pyglobals @@ -70,6 +70,42 @@ def test_post_earthquake_hazard_values(hazardsvc): 2.5884360071121133, 34.445240752324956] +def test_bad_units_post_earthquake_hazard_values(hazardsvc): + payload = [ + { + "demands": ["1.0 SD", "0.2 SA"], + "units": ["cm", "zzz"], + "loc": "35.84,-89.90" + } + ] + response = hazardsvc.post_earthquake_hazard_values( + "5b902cb273c3371e1236b36b", + payload + ) + + assert len(response) == len(payload) and response[0]['hazardValues'][1] == -9999.3 + + +def test_bad_format_post_earthquake_hazard_values(hazardsvc): + payload = [ + { + "demands": ["1.0 SD", "0.2 SA"], + "units": ["cm", "g"], + "loc": "35.84-89.90" + } + ] + + try: + hazardsvc.post_earthquake_hazard_values( + "5b902cb273c3371e1236b36b", + payload + ) + except requests.exceptions.HTTPError as e: + assert e.response.status_code == 400 + else: + assert False + + def test_get_liquefaction_values(hazardsvc): liq_vals = hazardsvc.get_liquefaction_values("5b902cb273c3371e1236b36b", "5a284f53c7d30d13bc08249c", diff --git a/tests/pyincore/analyses/joplincge/test_joplincge_json.py b/tests/pyincore/utils/test_cgecsvoutputjson.py similarity index 62% rename from tests/pyincore/analyses/joplincge/test_joplincge_json.py rename to tests/pyincore/utils/test_cgecsvoutputjson.py index ffe5c4013..637f4af5a 100644 --- a/tests/pyincore/analyses/joplincge/test_joplincge_json.py +++ b/tests/pyincore/utils/test_cgecsvoutputjson.py @@ -1,14 +1,19 @@ +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + from pyincore import IncoreClient from pyincore.analyses.joplincge import JoplinCGEModel from pyincore.utils.cgeoutputprocess import CGEOutputProcess import pyincore.globals as pyglobals +import os # This script runs JoplinCGEModel analysis with input files from # IN-CORE development services. The output csv files are converted to json # format suitable for the IN-CORE Playbook tool. -def run_base_analysis(): +def run_convert_cge_json_chained(): client = IncoreClient(pyglobals.INCORE_API_DEV_URL) joplin_cge = JoplinCGEModel(client) @@ -59,16 +64,37 @@ def run_base_analysis(): cge_json.get_cge_gross_income(gross_income_result, None, "cge_total_household_income.json") cge_json.get_cge_employment(pre_demand_result, post_demand_result, None, None, "cge_employment.json") - # Alternatively, you can run the output conversion with files on the file system. - # my_path = "PATH_TO_PYINCORE/pyincore/tests/pyincore/analyses/joplincge/" - # cge_json.get_cge_household_count(None, my_path + "household-count.csv", "cge_total_household_count.json") - # cge_json.get_cge_gross_income(None, my_path + "gross-income.csv", "cge_total_household_income.json") - # cge_json.get_cge_employment(None, None, - # my_path + "pre-disaster-factor-demand.csv", - # my_path + "post-disaster-factor-demand.csv", - # "cge_employment.json") - # cge_json.get_cge_domestic_supply(None, my_path + "domestic-supply.csv", "cge_domestic_supply.json") + return True + + +def run_convert_cge_json_path(testpath): + # test the external file with a path + + cge_json = CGEOutputProcess() + cge_json.get_cge_household_count(None, + os.path.join(testpath, "household-count.csv"), + "cge_total_household_count.json") + cge_json.get_cge_gross_income(None, + os.path.join(testpath, "gross-income.csv"), + "cge_total_household_income.json") + cge_json.get_cge_employment(None, None, + os.path.join(testpath, "pre-disaster-factor-demand.csv"), + os.path.join(testpath, "post-disaster-factor-demand.csv"), + "cge_employment.json") + cge_json.get_cge_domestic_supply(None, + os.path.join(testpath, "domestic-supply.csv"), + "cge_domestic_supply.json") + return True if __name__ == '__main__': - run_base_analysis() + # test chaining with Joplin CGE analysis + run_convert_cge_json_chained() + + # test the external file with a path + testpath = "" + # testpath = "/Users///pyincore/tests/pyincore/utils" + if testpath: + run_convert_cge_json_path(testpath) + + print("DONE") diff --git a/tests/pyincore/utils/test_popdisloutputprocess.py b/tests/pyincore/utils/test_popdisloutputprocess.py new file mode 100644 index 000000000..16784c30b --- /dev/null +++ b/tests/pyincore/utils/test_popdisloutputprocess.py @@ -0,0 +1,98 @@ +# This program and the accompanying materials are made available under the +# terms of the Mozilla Public License v2.0 which accompanies this distribution, +# and is available at https://www.mozilla.org/en-US/MPL/2.0/ + +from pyincore import PopDislOutputProcess +from pyincore import IncoreClient, DataService, SpaceService +from pyincore.analyses.populationdislocation import PopulationDislocation +import pyincore.globals as pyglobals +import os + + +def upload_shapefile_to_services(client): + # Upload to incore services and put under commresilience space + # It assumes the shapefile is in the utils directory + datasvc = DataService(client) + dataset_prop = { + "title": "Joplin Population Dislocation For Heatmap Plotting", + "description": "Contains only dislocated numprec for Joplin playbook plotting usage", + "contributors": [], + "dataType": "incore:popdislocationShp", + "storedUrl": "", + "format": "shapefile" + } + response = datasvc.create_dataset(dataset_prop) + dataset_id = response['id'] + files = ['joplin-pop-disl-numprec.shp', + 'joplin-pop-disl-numprec.dbf', + 'joplin-pop-disl-numprec.shx', + 'joplin-pop-disl-numprec.prj'] + datasvc.add_files_to_dataset(dataset_id, files) + + # add to space + spacesvc = SpaceService(client) + spacesvc.add_dataset_to_space("5f99ba8b0ace240b22a82e00", dataset_id=dataset_id) # commresilience + print(dataset_id + " successfully uploaded and move to commresilience space!") + + +def run_convert_pd_json_chained(client): + # Joplin population dislocation + # incore-dev + building_dmg = "602d96e4b1db9c28aeeebdce" # dev Joplin + # building_dmg = "602d975db1db9c28aeeebe35" # 15 guids test - dev Joplin + housing_unit_alloc = "61563545483ecb19e4304c2a" # dev Joplin 2ev3 + bg_data = "5df7cb0b425e0b00092c9464" # Joplin 2ev2 + value_loss = "602d508fb1db9c28aeedb2a5" + + result_name = "joplin-pop-disl-results" + seed = 1111 + + pop_dis = PopulationDislocation(client) + + pop_dis.load_remote_input_dataset("building_dmg", building_dmg) + pop_dis.load_remote_input_dataset("housing_unit_allocation", housing_unit_alloc) + pop_dis.load_remote_input_dataset("block_group_data", bg_data) + pop_dis.load_remote_input_dataset("value_poss_param", value_loss) + + pop_dis.set_parameter("result_name", result_name) + pop_dis.set_parameter("seed", seed) + + pop_dis.run_analysis() + pd_result = pop_dis.get_output_dataset("result") + + return pd_result + + +if __name__ == '__main__': + # test chaining with population dislocation + client = IncoreClient(pyglobals.INCORE_API_DEV_URL) + + pd_result = run_convert_pd_json_chained(client) + + pd_process = PopDislOutputProcess(pd_result) + + pd_process.pd_by_race("PD_by_race.json") + pd_process.pd_by_income("PD_by_income.json") + pd_process.pd_by_tenure("PD_by_tenure.json") + pd_process.pd_by_housing("PD_by_housing.json") + pd_process.pd_total("PD_by_total.json") + + filename = pd_process.get_heatmap_shp("joplin-pop-disl-numprec.shp") + print("Test chaining", filename) + # upload_shapefile_to_services(client) + + # test the external file with a path + testpath = "" + # testpath = "/Users///pyincore/tests/pyincore/utils" + if testpath: + pd_process = PopDislOutputProcess(None, os.path.join(testpath, "joplin-pop-disl-results.csv")) + + pd_process.pd_by_race("PD_by_race.json") + pd_process.pd_by_income("PD_by_income.json") + pd_process.pd_by_tenure("PD_by_tenure.json") + pd_process.pd_by_housing("PD_by_housing.json") + pd_process.pd_total("PD_by_total.json") + + filename = pd_process.get_heatmap_shp("joplin-pop-disl-numprec.shp") + print("Test path", filename) + print("DONE") diff --git a/tests/test_format.py b/tests/test_format.py index a9dd3585e..a41de7708 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -49,7 +49,16 @@ os.path.join(PYINCORE_ROOT_FOLDER, 'pyincore/analyses/populationdislocation/'), os.path.join(PYINCORE_ROOT_FOLDER, 'tests/pyincore/analyses/cumulativebuildingdamage/test_cumulativebuildingdamage.py'), - os.path.join(PYINCORE_ROOT_FOLDER, 'tests/pyincore/analyses/populationdislocation/test_populationdislocation.py') + os.path.join(PYINCORE_ROOT_FOLDER, 'tests/pyincore/analyses/populationdislocation/test_populationdislocation.py'), + os.path.join(PYINCORE_ROOT_FOLDER, 'tests/pyincore/models/test_dataset.py'), + os.path.join(PYINCORE_ROOT_FOLDER, 'pyincore/utils/cgeoutputprocess.py'), + os.path.join(PYINCORE_ROOT_FOLDER, 'tests/pyincore/utils/test_cgecsvoutputjson.py'), + os.path.join(PYINCORE_ROOT_FOLDER, 'pyincore/analyses/joplinempiricalrestoration/joplinempiricalrestoration.py'), + os.path.join(PYINCORE_ROOT_FOLDER, 'pyincore/analyses/joplinempiricalrestoration/joplinempirrestor_util.py'), + os.path.join(PYINCORE_ROOT_FOLDER, + 'tests/pyincore/analyses/joplinempiricalrestoration/test_joplinempiricalrestoration.py'), + os.path.join(PYINCORE_ROOT_FOLDER, 'tests/pyincore/analyses/populationdislocation/test_populationdislocation.py'), + os.path.join(PYINCORE_ROOT_FOLDER, 'tests/pyincore/utils/test_popdisloutputprocess.py') ]