From ee01ca958296f5df74aa1b9f01323a347924b3be Mon Sep 17 00:00:00 2001 From: trevorb1 Date: Tue, 16 Jul 2024 08:17:31 -0700 Subject: [PATCH] bump flake8 to 7.1.0 --- .pre-commit-config.yaml | 4 ++-- docs/contributing.rst | 2 +- src/otoole/results/results.py | 12 ++++++------ tests/test_read_strategies.py | 5 ++--- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index edb7dc51..34a415bf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -47,13 +47,13 @@ repos: # additional_dependencies: [black] - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 7.1.0 hooks: - id: flake8 ## You can add flake8 plugins via `additional_dependencies`: # additional_dependencies: [flake8-bugbear] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.9.0 # Use the sha / tag you want to point at + rev: v1.10.1 # Use the sha / tag you want to point at hooks: - id: mypy additional_dependencies: ['types-PyYAML'] diff --git a/docs/contributing.rst b/docs/contributing.rst index 3bdd7dc2..e582053e 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -1 +1 @@ -.. include:: ../CONTRIBUTING.rst \ No newline at end of file +.. include:: ../CONTRIBUTING.rst diff --git a/src/otoole/results/results.py b/src/otoole/results/results.py index ae45d737..1954801c 100644 --- a/src/otoole/results/results.py +++ b/src/otoole/results/results.py @@ -1,7 +1,7 @@ import logging from abc import abstractmethod from io import StringIO -from typing import Any, Dict, List, Set, TextIO, Tuple, Union +from typing import Any, Dict, TextIO, Tuple, Union import pandas as pd @@ -145,7 +145,7 @@ def _convert_wide_to_long(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]: return results -def check_duplicate_index(df: pd.DataFrame, columns: List, index: List) -> pd.DataFrame: +def check_duplicate_index(df: pd.DataFrame, columns: list, index: list) -> pd.DataFrame: """Catches pandas error when there are duplicate column indices""" if check_for_duplicates(index): index = rename_duplicate_column(index) @@ -156,12 +156,12 @@ def check_duplicate_index(df: pd.DataFrame, columns: List, index: List) -> pd.Da return df, index -def check_for_duplicates(index: List) -> bool: +def check_for_duplicates(index: list) -> bool: return len(set(index)) != len(index) -def identify_duplicate(index: List) -> Union[int, bool]: - elements = set() # type: Set +def identify_duplicate(index: list) -> Union[int, bool]: + elements = set() # type: set for counter, elem in enumerate(index): if elem in elements: return counter @@ -170,7 +170,7 @@ def identify_duplicate(index: List) -> Union[int, bool]: return False -def rename_duplicate_column(index: List) -> List: +def rename_duplicate_column(index: list) -> list: column = index.copy() location = identify_duplicate(column) if location: diff --git a/tests/test_read_strategies.py b/tests/test_read_strategies.py index 574fcee8..6b11e62d 100644 --- a/tests/test_read_strategies.py +++ b/tests/test_read_strategies.py @@ -1,7 +1,6 @@ import os from io import StringIO from textwrap import dedent -from typing import List import pandas as pd from amply import Amply @@ -384,7 +383,7 @@ def test_read_cbc_to_dataframe(self, cbc_input, expected, user_config): ).set_index(["REGION", "EMISSION", "YEAR"]) }, ), - ] # type: List + ] # type: list @mark.parametrize( "results,expected", @@ -398,7 +397,7 @@ def test_convert_cbc_to_csv_long(self, results, expected, user_config): for name, df in actual.items(): pd.testing.assert_frame_equal(df, expected[name]) - test_data_3 = [(total_cost_cbc, {}, total_cost_otoole_df)] # type: List + test_data_3 = [(total_cost_cbc, {}, total_cost_otoole_df)] # type: list @mark.parametrize( "cbc_solution,input_data,expected",