Skip to content

Commit

Permalink
Merge pull request #188 from NREL/release-1.3.0
Browse files Browse the repository at this point in the history
Release 1.3.0
  • Loading branch information
rjhanes authored Sep 30, 2022
2 parents 41c02bb + 8dd54cf commit 9ff0b04
Show file tree
Hide file tree
Showing 26 changed files with 1,240 additions and 1,235 deletions.
9 changes: 5 additions & 4 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,9 @@ instance/
# Scrapy stuff:
.scrapy

# Sphinx documentation
# Sphinx documentation (stored in doc-pages branch)
docs/_build/
docs/docs/

# PyBuilder
target/
Expand Down Expand Up @@ -129,13 +130,13 @@ ENV/
.Rproj.user
.Rhistory
*.Rproj
*.html


# mypy
.mypy_cache/

# Sphinx documentation build
doc/_build


celavi/Eberle - NREL LDRD CELAVI - Overview_jan2021.pptx
routing/spatialresolutionTexas/Spatial Analysis CELAVI.docx
routing/spatialresolutionTexas/TexasMap.pdf
Expand Down
28 changes: 12 additions & 16 deletions celavi/compute_locations.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,13 +135,13 @@ def wind_power_plant(self):
(turbine_locations['p_year'] != '-1')]

# reformat data for later use
turbine_locations_with_eia.rename(columns={"t_state": "region_id_2",
turbine_locations_with_eia = turbine_locations_with_eia.rename(columns={"t_state": "region_id_2",
"t_county": "region_id_3",
"xlong": "long",
"ylat": "lat",
"eia_id": "facility_id",
"p_year": "year"},
inplace=True)
)

# exclude Hawaii, Guam, Puerto Rico, and Alaska (only have road network data for the contiguous United States)
turbine_locations_with_eia = turbine_locations_with_eia[turbine_locations_with_eia.region_id_2 != 'GU']
Expand Down Expand Up @@ -178,7 +178,7 @@ def wind_power_plant(self):

# determine average lat and long for all turbines by facility_id
# (this is the plant location for each facility_id)
plant_locations = turbine_locations_filtered.groupby(['facility_id', 'region_id_2', "region_id_3"])['long', 'lat'].agg('mean').reset_index()
plant_locations = turbine_locations_filtered.groupby(['facility_id', 'region_id_2', "region_id_3"])[['long', 'lat']].agg('mean').reset_index()
# Dropping duplicates on p year and eia id and keeping only the first county occurences in case spread over multiple counties. Assumption
plant_locations = plant_locations.drop_duplicates(subset=['facility_id'], keep='first')
plant_locations = plant_locations.astype({'facility_id': 'int'}) # recast type for facility_id
Expand Down Expand Up @@ -351,7 +351,7 @@ def capacity_projections(self):

# where total capacity decreases in a year, set the new capacity value
# to 0
stscen['cap_new'][stscen['cap_new'] < 0] = 0
stscen.loc[stscen['cap_new'] < 0,'cap_new'] = 0

# .diff() leaves empty values where there is no previous row.
# replace these NAs with 0
Expand Down Expand Up @@ -445,12 +445,8 @@ def capacity_projections(self):
on='p_name',
how='outer'
)

self.capacity_data.append(
capacity_future,
ignore_index=True,
sort=True
).rename(
self.capacity_data = pd.concat([self.capacity_data,capacity_future])
self.capacity_data = self.capacity_data.sort_values(by = list(self.capacity_data.columns)).rename(
columns={'n_turbine': 'n_technology'}
).to_csv(
self.technology_data_filename,
Expand Down Expand Up @@ -484,10 +480,9 @@ def capacity_projections(self):
# Add the future power plants to the locations dataset stored in self
# It has to go back into self to get saved at the end of the
# join_facilities method
self.locs = self.locs.append(
_new_facility_locs,
ignore_index=True,
sort=True)
self.locs = pd.concat([self.locs,_new_facility_locs])
self.locs = self.locs.sort_values(by = list(self.locs.columns))


def join_facilities(self, locations_output_file):
"""
Expand All @@ -506,8 +501,9 @@ def join_facilities(self, locations_output_file):
landfill_locations_no_nulls = ComputeLocations.landfill(self)
facility_locations = ComputeLocations.other_facility(self)

locations = facility_locations.append(wind_plant_locations)
locations = locations.append(landfill_locations_no_nulls)

locations = pd.concat([facility_locations,wind_plant_locations])
locations = pd.concat([locations,landfill_locations_no_nulls])
locations.reset_index(drop=True, inplace=True)

# exclude Hawaii, Guam, Puerto Rico, and Alaska
Expand Down
16 changes: 11 additions & 5 deletions celavi/costgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def __init__(
sc_in_circ : List[str]=[],
sc_out_circ : List[str]=[],
year: float = 2000.0,
start_year: float = 2000.0,
verbose: int = 0,
save_copy=False,
save_name="netw.csv",
Expand Down Expand Up @@ -78,6 +79,8 @@ def __init__(
Facility type(s) that process material for re-circulation outside the supply chain
year : float
Simulation year provided by the DES at CostGraph instantiation.
start_year : float
Year at beginning of the model run.
verbose : int
Integer specifying how much info CostGraph should provide as it
works.
Expand All @@ -95,7 +98,7 @@ def __init__(
random_state : np.random.default_rng
Instantiated random number generator for uncertainty analysis.
"""
self.cost_methods = CostMethods(seed=random_state, run=run)
self.cost_methods = CostMethods(start_year = start_year, seed=random_state, run=run)

self.start_time = time()
self.step_costs = pd.read_csv(step_costs_file)
Expand Down Expand Up @@ -716,10 +719,13 @@ def build_supplychain_graph(self):

# Year and component mass are defined when CostGraph is instantiated
# and do not need to be updated during supply chain generation

self.supply_chain.edges[edge]["cost"] = sum(
[f(_edge_dict) for f in self.supply_chain.edges[edge]["cost_method"]]
)
try:
self.supply_chain.edges[edge]["cost"] = sum(
[f(_edge_dict) for f in self.supply_chain.edges[edge]["cost_method"]]
)
except TypeError:
print(f'CostGraph: A cost method assigned to {edge} is returning None', flush=True)
raise TypeError

if self.verbose > 0:
print(
Expand Down
Loading

0 comments on commit 9ff0b04

Please sign in to comment.