Skip to content

Commit

Permalink
Fix type errors in LanguageProcessors
Browse files Browse the repository at this point in the history
  • Loading branch information
Swatinem committed Sep 11, 2024
1 parent ab427f8 commit 5830e7b
Show file tree
Hide file tree
Showing 11 changed files with 217 additions and 224 deletions.
19 changes: 17 additions & 2 deletions services/report/languages/helpers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,19 @@
def remove_non_ascii(string, replace_with=""):
from xml.etree.ElementTree import Element


def remove_non_ascii(string: str) -> str:
# ASCII control characters <=31, 127
# Extended ASCII characters: >=128
return "".join([i if 31 < ord(i) < 127 else replace_with for i in string])
return "".join([c if 31 < ord(c) < 127 else "" for c in string])


def child_text(parent: Element, element: str) -> str:
"""
Returns the text content of the first element of type `element` of `parent`.
This defaults to the empty string if no child is found, or the child does not have any text.
"""
child = parent.find(element)
if not child:
return ""
return child.text or ""

Check warning on line 19 in services/report/languages/helpers.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/helpers.py#L19

Added line #L19 was not covered by tests
14 changes: 7 additions & 7 deletions services/report/languages/lua.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,18 @@ def process(
docs = re.compile(r"^=+\n", re.M).split


def from_txt(string: bytes, report_builder_session: ReportBuilderSession) -> None:
def from_txt(input: bytes, report_builder_session: ReportBuilderSession) -> None:
_file = None
for string in docs(string.decode(errors="replace").replace("\t", " ")):
string = string.rstrip()
if string == "Summary":
for line in docs(input.decode(errors="replace").replace("\t", " ")):
line = line.rstrip()
if line == "Summary":
_file = None

elif string.endswith((".lua", ".lisp")):
_file = report_builder_session.create_coverage_file(string)
elif line.endswith((".lua", ".lisp")):
_file = report_builder_session.create_coverage_file(line)

elif _file is not None:
for ln, source in enumerate(string.splitlines(), start=1):
for ln, source in enumerate(line.splitlines(), start=1):
try:
cov = source.strip().split(" ")[0]
cov = 0 if cov[-2:] in ("*0", "0") else int(cov)
Expand Down
7 changes: 4 additions & 3 deletions services/report/languages/mono.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,18 @@ def from_xml(xml: Element, report_builder_session: ReportBuilderSession) -> None
if filename not in files:
_file = report_builder_session.create_coverage_file(filename)
files[filename] = _file

_file = files[filename]
if _file is None:
continue

# loop through statements
for line in method.iter("statement"):
line = line.attrib
coverage = int(line["counter"])
attr = line.attrib
coverage = int(attr["counter"])

Check warning on line 38 in services/report/languages/mono.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/mono.py#L37-L38

Added lines #L37 - L38 were not covered by tests

_file.append(
int(line["line"]),
int(attr["line"]),
report_builder_session.create_coverage_line(
coverage,
),
Expand Down
132 changes: 65 additions & 67 deletions services/report/languages/pycoverage.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
from typing import Dict, List, Optional, Union

import sentry_sdk

from services.report.languages.base import BaseLanguageProcessor
Expand All @@ -11,14 +9,69 @@

class PyCoverageProcessor(BaseLanguageProcessor):
def matches_content(self, content: dict, first_line: str, name: str) -> bool:
return (
"meta" in content
and "files" in content
and isinstance(content.get("meta"), dict)
and "show_contexts" in content.get("meta")
)
meta = content.get("meta")
return "files" in content and isinstance(meta, dict) and "show_contexts" in meta

@sentry_sdk.trace
def process(
self, content: dict, report_builder_session: ReportBuilderSession
) -> None:
labels_table = LabelsTable(report_builder_session, content)

for filename, file_coverage in content["files"].items():
_file = report_builder_session.create_coverage_file(filename)
if _file is None:
continue

Check warning on line 24 in services/report/languages/pycoverage.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/pycoverage.py#L24

Added line #L24 was not covered by tests

lines_and_coverage = [
(COVERAGE_HIT, ln) for ln in file_coverage["executed_lines"]
] + [(COVERAGE_MISS, ln) for ln in file_coverage["missing_lines"]]
for cov, ln in lines_and_coverage:
if ln > 0:
label_list_of_lists: list[list[str]] | list[list[int]] = []
if report_builder_session.should_use_label_index:
label_list_of_lists = [
[single_id]
for single_id in labels_table._get_list_of_label_ids(
report_builder_session.label_index,
file_coverage.get("contexts", {}).get(str(ln), []),
)
]
else:
label_list_of_lists = [
[labels_table._normalize_label(testname)]
for testname in file_coverage.get("contexts", {}).get(
str(ln), []
)
]
_file.append(
ln,
report_builder_session.create_coverage_line(
cov,
labels_list_of_lists=label_list_of_lists,
),
)
report_builder_session.append(_file)


class LabelsTable:
def __init__(
self, report_builder_session: ReportBuilderSession, content: dict
) -> None:
self.labels_table: dict[str, str] = {}
self.reverse_table: dict[str, int] = {}
self.are_labels_already_encoded = False

# Compressed pycoverage files will include a labels_table
if "labels_table" in content:
self.labels_table = content["labels_table"]
# We can pre-populate some of the indexes that will be used
for idx, testname in self.labels_table.items():
clean_label = self._normalize_label(testname)
report_builder_session.label_index[int(idx)] = clean_label
self.are_labels_already_encoded = True

def _normalize_label(self, testname) -> str:
def _normalize_label(self, testname: int | float | str) -> str:
if isinstance(testname, int) or isinstance(testname, float):
# This is from a compressed report.
# Pull label from the labels_table
Expand All @@ -30,9 +83,9 @@ def _normalize_label(self, testname) -> str:

def _get_list_of_label_ids(
self,
current_label_idx: Optional[Dict[int, str]],
line_contexts: List[Union[str, int]] = None,
) -> List[int]:
current_label_idx: dict[int, str],
line_contexts: list[str | int],
) -> list[int]:
if self.are_labels_already_encoded:
# The line contexts already include indexes in the table.
# We can re-use the table and don't have to do anything with contexts.
Expand All @@ -51,58 +104,3 @@ def _get_list_of_label_ids(
label_ids_for_line.add(label_id)

return sorted(label_ids_for_line)

@sentry_sdk.trace
def process(
self, content: dict, report_builder_session: ReportBuilderSession
) -> None:
# Compressed pycoverage files will include a labels_table
# Mapping label_idx: int --> label: str
self.labels_table: dict[int, str] = None
self.reverse_table = {}
self.are_labels_already_encoded = False
if "labels_table" in content:
self.labels_table = content["labels_table"]
# We can pre-populate some of the indexes that will be used
for idx, testname in self.labels_table.items():
clean_label = self._normalize_label(testname)
report_builder_session.label_index[int(idx)] = clean_label
self.are_labels_already_encoded = True

for filename, file_coverage in content["files"].items():
_file = report_builder_session.create_coverage_file(filename)
if _file is None:
continue

lines_and_coverage = [
(COVERAGE_HIT, ln) for ln in file_coverage["executed_lines"]
] + [(COVERAGE_MISS, ln) for ln in file_coverage["missing_lines"]]
for cov, ln in lines_and_coverage:
if report_builder_session.should_use_label_index:
label_list_of_lists = [
[single_id]
for single_id in self._get_list_of_label_ids(
report_builder_session.label_index,
file_coverage.get("contexts", {}).get(str(ln), []),
)
]
else:
label_list_of_lists = [
[self._normalize_label(testname)]
for testname in file_coverage.get("contexts", {}).get(
str(ln), []
)
]
if ln > 0:
_file.append(
ln,
report_builder_session.create_coverage_line(
cov,
labels_list_of_lists=label_list_of_lists,
),
)
report_builder_session.append(_file)

# We don't need these anymore, so let them be removed by the garbage collector
self.reverse_table = None
self.labels_table = None
62 changes: 18 additions & 44 deletions services/report/languages/scoverage.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@
from shared.helpers.numeric import maxint
from shared.reports.resources import ReportFile

from services.report.languages.base import BaseLanguageProcessor
from services.report.report_builder import CoverageType, ReportBuilderSession

from .base import BaseLanguageProcessor
from .helpers import child_text


class SCoverageProcessor(BaseLanguageProcessor):
def matches_content(self, content: Element, first_line: str, name: str) -> bool:
Expand All @@ -20,53 +22,24 @@ def process(


def from_xml(xml: Element, report_builder_session: ReportBuilderSession) -> None:
path_fixer = report_builder_session.path_fixer

ignore = []
cache_fixes = {}
_cur_file_name = None
files: dict[str, ReportFile] = {}
files: dict[str, ReportFile | None] = {}
for statement in xml.iter("statement"):
# Determine the path
unfixed_path = next(statement.iter("source")).text
if unfixed_path in ignore:
continue

elif unfixed_path in cache_fixes:
# cached results
filename = cache_fixes[unfixed_path]

else:
# fix path
filename = path_fixer(unfixed_path)
if filename is None:
# add unfixed to list of ignored
ignore.append(unfixed_path)
continue

# cache result (unfixed => filenmae)
cache_fixes[unfixed_path] = filename
filename = child_text(statement, "source")
if filename not in files:
files[filename] = report_builder_session.create_coverage_file(filename)

# Get the file
if filename != _cur_file_name:
_cur_file_name = filename
if filename not in files:
_file = report_builder_session.create_coverage_file(
filename, do_fix_path=False
)
files[filename] = _file
_file = files[filename]
_file = files.get(filename)
if _file is None:
continue

# Add the line
ln = int(next(statement.iter("line")).text)
hits = next(statement.iter("count")).text
try:
if next(statement.iter("ignored")).text == "true":
continue
except StopIteration:
pass
ln = int(child_text(statement, "line"))
hits = child_text(statement, "count")

Check warning on line 37 in services/report/languages/scoverage.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/scoverage.py#L36-L37

Added lines #L36 - L37 were not covered by tests

if child_text(statement, "ignored") == "true":
continue

Check warning on line 40 in services/report/languages/scoverage.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/scoverage.py#L39-L40

Added lines #L39 - L40 were not covered by tests

if next(statement.iter("branch")).text == "true":
if child_text(statement, "branch") == "true":

Check warning on line 42 in services/report/languages/scoverage.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/scoverage.py#L42

Added line #L42 was not covered by tests
cov = "%s/2" % hits
_file.append(
ln,
Expand All @@ -85,4 +58,5 @@ def from_xml(xml: Element, report_builder_session: ReportBuilderSession) -> None
)

for _file in files.values():
report_builder_session.append(_file)
if _file is not None:
report_builder_session.append(_file)

Check warning on line 62 in services/report/languages/scoverage.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/scoverage.py#L62

Added line #L62 was not covered by tests
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from services.report.languages.pycoverage import PyCoverageProcessor
from services.report.languages.pycoverage import LabelsTable, PyCoverageProcessor
from services.report.report_builder import SpecialLabelsEnum
from test_utils.base import BaseTestCase

Expand Down Expand Up @@ -189,9 +189,7 @@ def test_matches_content_pycoverage(self):
assert not p.matches_content({"meta": {}}, "", "coverage.json")

def test__get_list_of_label_ids(self):
p = PyCoverageProcessor()
p.are_labels_already_encoded = False
p.reverse_table = {}
p = LabelsTable(None, {})
current_label_idx = {}
assert p._get_list_of_label_ids(current_label_idx, [""]) == [1]
assert current_label_idx == {
Expand All @@ -213,7 +211,7 @@ def test__get_list_of_label_ids(self):
}

def test__get_list_of_label_ids_already_encoded(self):
p = PyCoverageProcessor()
p = LabelsTable(None, {})
p.are_labels_already_encoded = True
assert p._get_list_of_label_ids({}, ["2"]) == [2]
assert p._get_list_of_label_ids({}, ["2", "3", "1"]) == [1, 2, 3]
Expand Down
10 changes: 5 additions & 5 deletions services/report/languages/vb.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ def from_xml(xml: Element, report_builder_session: ReportBuilderSession) -> None

# loop through each line
for line in module.iter("range"):
line = line.attrib
_file = files.get(line["source_id"])
attr = line.attrib
_file = files.get(attr["source_id"])
if _file is None:
continue

coverage = line["covered"]
coverage = 1 if coverage == "yes" else 0 if coverage == "no" else True
for ln in range(int(line["start_line"]), int(line["end_line"]) + 1):
cov_txt = attr["covered"]
coverage = 1 if cov_txt == "yes" else 0 if cov_txt == "no" else True
for ln in range(int(attr["start_line"]), int(attr["end_line"]) + 1):
_file.append(
ln,
report_builder_session.create_coverage_line(
Expand Down
17 changes: 10 additions & 7 deletions services/report/languages/vb2.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,11 @@
import sentry_sdk
from shared.reports.resources import ReportFile

from services.report.languages.base import BaseLanguageProcessor
from services.report.report_builder import ReportBuilderSession

from .base import BaseLanguageProcessor
from .helpers import child_text


class VbTwoProcessor(BaseLanguageProcessor):
def matches_content(self, content: Element, first_line: str, name: str) -> bool:
Expand All @@ -22,21 +24,22 @@ def from_xml(xml: Element, report_builder_session: ReportBuilderSession) -> None
files: dict[str, ReportFile] = {}
for source in xml.iter("SourceFileNames"):
_file = report_builder_session.create_coverage_file(
source.find("SourceFileName").text.replace("\\", "/")
child_text(source, "SourceFileName").replace("\\", "/")
)
if _file is not None:
files[source.find("SourceFileID").text] = _file
files[child_text(source, "SourceFileID")] = _file

Check warning on line 30 in services/report/languages/vb2.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/vb2.py#L30

Added line #L30 was not covered by tests

for line in xml.iter("Lines"):
_file = files.get(line.find("SourceFileID").text)
_file = files.get(child_text(line, "SourceFileID"))
if _file is None:
continue

# 0 == hit, 1 == partial, 2 == miss
cov = line.find("Coverage").text
cov = 1 if cov == "0" else 0 if cov == "2" else True
cov_txt = child_text(line, "Coverage")
cov = 1 if cov_txt == "0" else 0 if cov_txt == "2" else True

Check warning on line 39 in services/report/languages/vb2.py

View check run for this annotation

Codecov Notifications / codecov/patch

services/report/languages/vb2.py#L38-L39

Added lines #L38 - L39 were not covered by tests
for ln in range(
int(line.find("LnStart").text), int(line.find("LnEnd").text) + 1
int(child_text(line, "LnStart")),
int(child_text(line, "LnEnd")) + 1,
):
_file.append(
ln,
Expand Down
Loading

0 comments on commit 5830e7b

Please sign in to comment.