Skip to content

Commit

Permalink
bump flake8 to 7.1.0
Browse files Browse the repository at this point in the history
  • Loading branch information
trevorb1 committed Jul 16, 2024
1 parent 56643d1 commit ee01ca9
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 12 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,13 @@ repos:
# additional_dependencies: [black]

- repo: https://github.com/PyCQA/flake8
rev: 4.0.1
rev: 7.1.0
hooks:
- id: flake8
## You can add flake8 plugins via `additional_dependencies`:
# additional_dependencies: [flake8-bugbear]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.9.0 # Use the sha / tag you want to point at
rev: v1.10.1 # Use the sha / tag you want to point at
hooks:
- id: mypy
additional_dependencies: ['types-PyYAML']
2 changes: 1 addition & 1 deletion docs/contributing.rst
Original file line number Diff line number Diff line change
@@ -1 +1 @@
.. include:: ../CONTRIBUTING.rst
.. include:: ../CONTRIBUTING.rst
12 changes: 6 additions & 6 deletions src/otoole/results/results.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
from abc import abstractmethod
from io import StringIO
from typing import Any, Dict, List, Set, TextIO, Tuple, Union
from typing import Any, Dict, TextIO, Tuple, Union

import pandas as pd

Expand Down Expand Up @@ -145,7 +145,7 @@ def _convert_wide_to_long(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]:
return results


def check_duplicate_index(df: pd.DataFrame, columns: List, index: List) -> pd.DataFrame:
def check_duplicate_index(df: pd.DataFrame, columns: list, index: list) -> pd.DataFrame:
"""Catches pandas error when there are duplicate column indices"""
if check_for_duplicates(index):
index = rename_duplicate_column(index)
Expand All @@ -156,12 +156,12 @@ def check_duplicate_index(df: pd.DataFrame, columns: List, index: List) -> pd.Da
return df, index


def check_for_duplicates(index: List) -> bool:
def check_for_duplicates(index: list) -> bool:
return len(set(index)) != len(index)


def identify_duplicate(index: List) -> Union[int, bool]:
elements = set() # type: Set
def identify_duplicate(index: list) -> Union[int, bool]:
elements = set() # type: set
for counter, elem in enumerate(index):
if elem in elements:
return counter
Expand All @@ -170,7 +170,7 @@ def identify_duplicate(index: List) -> Union[int, bool]:
return False


def rename_duplicate_column(index: List) -> List:
def rename_duplicate_column(index: list) -> list:
column = index.copy()
location = identify_duplicate(column)
if location:
Expand Down
5 changes: 2 additions & 3 deletions tests/test_read_strategies.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import os
from io import StringIO
from textwrap import dedent
from typing import List

import pandas as pd
from amply import Amply
Expand Down Expand Up @@ -384,7 +383,7 @@ def test_read_cbc_to_dataframe(self, cbc_input, expected, user_config):
).set_index(["REGION", "EMISSION", "YEAR"])
},
),
] # type: List
] # type: list

@mark.parametrize(
"results,expected",
Expand All @@ -398,7 +397,7 @@ def test_convert_cbc_to_csv_long(self, results, expected, user_config):
for name, df in actual.items():
pd.testing.assert_frame_equal(df, expected[name])

test_data_3 = [(total_cost_cbc, {}, total_cost_otoole_df)] # type: List
test_data_3 = [(total_cost_cbc, {}, total_cost_otoole_df)] # type: list

@mark.parametrize(
"cbc_solution,input_data,expected",
Expand Down

0 comments on commit ee01ca9

Please sign in to comment.