Skip to content

Commit

Permalink
Merge pull request #32 from e2nIEE/develop
Browse files Browse the repository at this point in the history
new release 1.4.0
  • Loading branch information
SteffenMeinecke authored May 12, 2023
2 parents eca679b + a4808ce commit 4dc4352
Show file tree
Hide file tree
Showing 27 changed files with 362 additions and 222 deletions.
6 changes: 2 additions & 4 deletions .github/workflows/github_test_action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.6', '3.7', '3.8', '3.9']
python-version: ['3.8', '3.9', '3.10', '3.11']

steps:
- uses: actions/checkout@v2
Expand All @@ -32,9 +32,7 @@ jobs:
run: |
python -m pip install --upgrade pip
python -m pip install pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
if ${{ matrix.python-version == '3.7' }}; then python -m pip install pypower; fi
if ${{ matrix.python-version == '3.6' || matrix.python-version == '3.7' || matrix.python-version == '3.8' || matrix.python-version == '3.9' }}; then python -m pip install matplotlib; fi
pip install -r requirements.txt
pip install .["all"]
- name: List of installed packages
run: |
Expand Down
112 changes: 112 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# This workflow will create a Python package and upload it to testPyPi or PyPi
# Then, it installs simbench from there and all dependencies and runs tests with different Python versions

name: release

# Controls when the action will run.
on:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
inputs:
upload_server:
description: 'upload server'
required: true
default: 'testpypi'
type: choice
options:
- 'testpypi'
- 'pypi'

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
upload:
# The type of runner that the job will run on
runs-on: ubuntu-latest

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v3

# Sets up python3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
# Installs and upgrades pip, installs other dependencies and installs the package from setup.py
- name: Install dependencies
run: |
# Upgrade pip
python3 -m pip install --upgrade pip
# Install twine
python3 -m pip install setuptools wheel twine
# Upload to TestPyPI
- name: Build and Upload to TestPyPI
if: inputs.upload_server == 'testpypi'
run: |
python3 setup.py sdist --formats=zip
twine check dist/* --strict
python3 -m twine upload dist/*
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.TESTPYPI }}
TWINE_REPOSITORY: testpypi

# Upload to PyPI
- name: Build and Upload to PyPI
if: inputs.upload_server == 'pypi'
run: |
python3 setup.py sdist --formats=zip
twine check dist/* --strict
python3 -m twine upload dist/*
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI }}
TWINE_REPOSITORY: pypi

- name: Sleep for 60s to make release available
uses: juliangruber/sleep-action@v1
with:
time: 60s


build:

runs-on: ${{ matrix.os }}
needs: upload
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11']
os: [ ubuntu-latest, windows-latest ]
group: [ 1, 2 ]
steps:
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install pytest python-igraph pytest-split
- name: Install dependencies (Windows)
if: matrix.os == 'windows-latest'
run: |
if ( '${{ matrix.python-version }}' -ne '3.11' ) { python -m pip install numba }
- name: Install dependencies (Ubuntu)
if: matrix.os == 'ubuntu-latest'
run: |
if ${{ matrix.python-version != '3.11' }}; then python -m pip install numba; fi
- name: Install simbench from TestPyPI
if: inputs.upload_server == 'testpypi'
run: |
pip install --no-cache-dir -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple simbench
- name: Install simbench from PyPI
if: inputs.upload_server == 'pypi'
run: |
pip install simbench
- name: List all installed packages
run: |
pip list
- name: Test with pytest
run: |
pytest --splits 2 --group ${{ matrix.group }} --pyargs simbench.test
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@ tutorials/.ipynb_checkpoints
*.bak
*.orig
*.ini
.vscode/*
5 changes: 2 additions & 3 deletions .readthedocs.yml
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
version: 2

python:
version: 3.5
version: 3.8
install:
- requirements: doc/requirements.txt
- method: pip
path: .
extra_requirements:
- docs
- docs,plotting
6 changes: 6 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@
Change Log
=============

[1.4.0] - 2023-05-12
----------------------
- [CHANGED] pandas 2.0 support
- [CHANGED] pandapower 2.12.1 support
- [ADDED] GitHub Actions Workflow (:code:`release.yml`) to automate the SimBench release process

[1.3.0] - 2021-11-25
----------------------

Expand Down
4 changes: 2 additions & 2 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
:target: https://github.com/e2nIEE/simbench/actions/
:alt: GitHub Actions

.. image:: https://codecov.io/gh/e2nIEE/simbench/branch/master/graph/badge.svg
:target: https://codecov.io/github/e2nIEE/simbench?branch=master
.. image:: https://codecov.io/github/e2nIEE/simbench/coverage.svg?branch=master
:target: https://app.codecov.io/github/e2nIEE/simbench?branch=master
:alt: codecov

.. image:: https://pepy.tech/badge/simbench
Expand Down
4 changes: 2 additions & 2 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@
# built documents.
#
# The short X.Y version.
version = "1.3.0"
version = "1.4.0"
# The full version, including alpha/beta/rc tags.
release = "1.3.0"
release = "1.4.0"

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
1 change: 0 additions & 1 deletion doc/requirements.txt

This file was deleted.

2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# pip only:
pandapower>=2.12.1
22 changes: 13 additions & 9 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
from setuptools import setup, find_packages
import re

with open('README.rst', 'rb') as f:
install = f.read().decode('utf-8')

with open('CHANGELOG.rst', 'rb') as f:
changelog = f.read().decode('utf-8')

with open('README.rst', 'rb') as f:
readme = f.read().decode('utf-8')

with open('requirements.txt') as req_file:
requirements = req_file.read()

classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
Expand All @@ -24,28 +24,32 @@
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3']

with open('.github/workflows/github_test_action.yml', 'rb') as f:
lines = f.read().decode('utf-8')
versions = set(re.findall('3.[0-9]', lines))
for version in versions:
classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])
classifiers.append(f'Programming Language :: Python :: 3.{version[-1]}')

long_description = '\n\n'.join((install, changelog))
long_description = '\n\n'.join((readme, changelog))

setup(
name='simbench',
version='1.3.0',
version='1.4.0',
author='Steffen Meinecke',
author_email='[email protected]',
description='Electrical Power System Benchmark Models',
long_description=readme,
long_description_content_type="text/x-rst",
url='http://www.simbench.de/en',
license='odbl',
install_requires=["pandapower>=2.5"],
python_requires='>=3.8',
install_requires=requirements,
extras_require={"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"],
"all": ["numpydoc", "sphinx", "sphinx_rtd_theme"]},
"plotting": ["matplotlib"],
"tutorials": ["matplotlib"],
"all": ["numpydoc", "sphinx", "sphinx_rtd_theme", "matplotlib"]},
packages=find_packages(),
include_package_data=True,
classifiers=classifiers
classifiers=classifiers,
)
2 changes: 1 addition & 1 deletion simbench/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.

__version__ = "1.3.0"
__version__ = "1.4.0"
__author__ = "smeinecke"

import os
Expand Down
6 changes: 3 additions & 3 deletions simbench/converter/auxiliary.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from pandapower import compare_arrays

try:
import pplog as logging
import pandaplan.core.pplog as logging
except ImportError:
import logging

Expand Down Expand Up @@ -253,8 +253,8 @@ def append_str_by_underline_count(str_series, append_only_duplicates=False, coun
"""
# --- initalizations
# ensure only unique values in reserved_strings:
reserved_strings = pd.Series(sorted(set(reserved_strings))) if reserved_strings is not None \
else pd.Series(dtype=object)
reserved_strings = pd.Series(sorted(set(reserved_strings)), dtype=object) if reserved_strings \
is not None else pd.Series(dtype=object)
count = counting_start

# --- do first append
Expand Down
2 changes: 1 addition & 1 deletion simbench/converter/csv_data_manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from copy import deepcopy

try:
import pplog as logging
import pandaplan.core.pplog as logging
except ImportError:
import logging

Expand Down
5 changes: 3 additions & 2 deletions simbench/converter/csv_pp_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from pandapower.plotting import create_generic_coordinates

try:
import pplog as logging
import pandaplan.core.pplog as logging
except ImportError:
import logging

Expand Down Expand Up @@ -574,7 +574,8 @@ def _pp_types_to_csv1(net, export_pp_std_types):
pp_typenames = set(dummy_net.std_types[elm].keys())
unused_pp_typenames = pp_typenames - set(net[elm].std_type.unique())
typenames2convert = set(net.std_types[elm].keys()) - unused_pp_typenames
net.std_types[elm] = pd.DataFrame(net.std_types[elm]).T.loc[typenames2convert].reset_index()
net.std_types[elm] = pd.DataFrame(net.std_types[elm]).T.loc[list(
typenames2convert)].reset_index()
net.std_types[elm].rename(columns={"index": "std_type"}, inplace=True)

convert_line_type_acronym(net)
Expand Down
2 changes: 1 addition & 1 deletion simbench/converter/format_information.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from pandas import DataFrame

try:
import pplog as logging
import pandaplan.core.pplog as logging
except ImportError:
import logging

Expand Down
22 changes: 11 additions & 11 deletions simbench/converter/pp_net_manipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from copy import deepcopy

try:
import pplog as logging
import pandaplan.core.pplog as logging
except ImportError:
import logging

Expand Down Expand Up @@ -111,21 +111,21 @@ def convert_parallel_branches(net, multiple_entries=True, elm_to_convert=["line"
elm_to_append["parallel"] = 1
num_par = list(net[element].parallel.loc[parallels])
elm_to_append["name"] += [("_" + str(num)) for num in num_par]
net[element] = net[element].append(
pd.DataFrame(elm_to_append.values, columns=net[element].columns),
net[element] = pd.concat([net[element],
pd.DataFrame(elm_to_append.values, columns=net[element].columns)],
ignore_index=True)
net["res_"+element] = net["res_"+element].append(
pd.DataFrame(res_elm_to_append.values, columns=net["res_"+element].columns),
net["res_"+element] = pd.concat([net["res_"+element],
pd.DataFrame(res_elm_to_append.values, columns=net["res_"+element].columns)],
ignore_index=True)

# add parallel switches
for i, par in pd.Series(parallels).iteritems():
for i, par in enumerate(parallels):
sw_to_append = net.switch.loc[(net.switch.element == par) & (
net.switch.et == element[0])] # does not work for trafo3w
sw_to_append["element"] = n_elm + i
sw_to_append["name"] += "_" + str(num_par[i])
net["switch"] = net["switch"].append(
pd.DataFrame(sw_to_append.values, columns=net["switch"].columns),
net["switch"] = pd.concat([net["switch"],
pd.DataFrame(sw_to_append.values, columns=net["switch"].columns)],
ignore_index=True)
# update parallels
parallels = net[element].index[net[element].parallel > 1]
Expand Down Expand Up @@ -193,8 +193,8 @@ def merge_busbar_coordinates(net):
continue
connected_nodes = pp.get_connected_buses(net, bb_node, consider=("t", "s"))
if len(connected_nodes):
net.bus_geodata.x.loc[connected_nodes] = net.bus_geodata.x.at[bb_node]
net.bus_geodata.y.loc[connected_nodes] = net.bus_geodata.y.at[bb_node]
net.bus_geodata.x.loc[list(connected_nodes)] = net.bus_geodata.x.at[bb_node]
net.bus_geodata.y.loc[list(connected_nodes)] = net.bus_geodata.y.at[bb_node]
all_connected_buses |= connected_nodes


Expand Down Expand Up @@ -559,7 +559,7 @@ def move_slack_gens_to_ext_grid(net):

def ensure_bus_index_columns_as_int(net):
""" Ensures that all columns with bus indices, e.g. net.line.from_bus, have int as dtype. """
ebts = pp.element_bus_tuples(bus_elements=True, branch_elements=True, res_elements=False)
ebts = set(pp.element_bus_tuples(bus_elements=True, branch_elements=True, res_elements=False))
ebts |= {("switch", "element"), ("measurement", "element")}
for elm, bus in ebts:
net[elm][bus] = net[elm][bus].astype(int)
2 changes: 1 addition & 1 deletion simbench/converter/read_and_write.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import os

try:
import pplog as logging
import pandaplan.core.pplog as logging
except ImportError:
import logging

Expand Down
Loading

0 comments on commit 4dc4352

Please sign in to comment.