diff --git a/bugbug/model.py b/bugbug/model.py index a004bf5d55..2c72a2bc94 100644 --- a/bugbug/model.py +++ b/bugbug/model.py @@ -189,7 +189,7 @@ def get_human_readable_feature_names(self): elif type_ == "text": feature_name = f"Combined text contains '{feature_name}'" elif type_ not in ("data", "couple_data"): - raise Exception(f"Unexpected feature type for: {full_feature_name}") + raise ValueError(f"Unexpected feature type for: {full_feature_name}") cleaned_feature_names.append(feature_name) diff --git a/bugbug/models/testselect.py b/bugbug/models/testselect.py index 0dec44986f..5b3da28f40 100644 --- a/bugbug/models/testselect.py +++ b/bugbug/models/testselect.py @@ -81,7 +81,7 @@ def _get_cost(config: str) -> int: if all(s in config for s in substrings): return cost - raise Exception(f"Couldn't find cost for {config}") + raise ValueError(f"Couldn't find cost for {config}") def _generate_equivalence_sets( diff --git a/bugbug/repository.py b/bugbug/repository.py index 8090a8008e..cd176eef71 100644 --- a/bugbug/repository.py +++ b/bugbug/repository.py @@ -1536,7 +1536,7 @@ def trigger_pull() -> None: raise if p.returncode != 0: - raise Exception( + raise RuntimeError( f"Error {p.returncode} when pulling {revision} from {branch}" ) diff --git a/bugbug/rust_code_analysis_server.py b/bugbug/rust_code_analysis_server.py index 4d85c986f8..445e662d73 100644 --- a/bugbug/rust_code_analysis_server.py +++ b/bugbug/rust_code_analysis_server.py @@ -35,7 +35,7 @@ def __init__(self, thread_num: Optional[int] = None): time.sleep(0.35) self.terminate() - raise Exception("Unable to run rust-code-analysis server") + raise RuntimeError("Unable to run rust-code-analysis server") @property def base_url(self): @@ -50,7 +50,7 @@ def start_process(self, thread_num: Optional[int] = None): cmd += ["-j", str(thread_num)] self.proc = subprocess.Popen(cmd) except FileNotFoundError: - raise Exception("rust-code-analysis is required for code analysis") + raise RuntimeError("rust-code-analysis is required for code analysis") def terminate(self): if self.proc is not None: diff --git a/bugbug/test_scheduling.py b/bugbug/test_scheduling.py index 2e07235e48..4314f0ee16 100644 --- a/bugbug/test_scheduling.py +++ b/bugbug/test_scheduling.py @@ -117,6 +117,12 @@ ) +class UnexpectedGranularityError(ValueError): + def __init__(self, granularity): + message = f"Unexpected {granularity} granularity" + super().__init__(message) + + def filter_runnables( runnables: tuple[Runnable, ...], all_runnables: Set[Runnable], granularity: str ) -> tuple[Any, ...]: @@ -184,7 +190,7 @@ def rename_runnables( for config, group in config_groups ) else: - raise Exception(f"Unexpected {granularity} granularity") + raise UnexpectedGranularityError(granularity) def get_push_data( @@ -296,7 +302,7 @@ def get_test_scheduling_history(granularity): elif granularity == "config_group": test_scheduling_db = TEST_CONFIG_GROUP_SCHEDULING_DB else: - raise Exception(f"{granularity} granularity unsupported") + raise UnexpectedGranularityError(granularity) for obj in db.read(test_scheduling_db): yield obj["revs"], obj["data"] @@ -310,7 +316,7 @@ def get_past_failures(granularity, readonly): elif granularity == "config_group": past_failures_db = os.path.join("data", PAST_FAILURES_CONFIG_GROUP_DB) else: - raise Exception(f"{granularity} granularity unsupported") + raise UnexpectedGranularityError(granularity) return shelve.Shelf( LMDBDict(past_failures_db[: -len(".tar.zst")], readonly=readonly), @@ -325,7 +331,7 @@ def get_failing_together_db_path(granularity: str) -> str: elif granularity == "config_group": path = FAILING_TOGETHER_CONFIG_GROUP_DB else: - raise Exception(f"{granularity} granularity unsupported") + raise UnexpectedGranularityError(granularity) return os.path.join("data", path[: -len(".tar.zst")]) diff --git a/http_service/tests/test_integration.py b/http_service/tests/test_integration.py index 4076051fee..73f891307b 100644 --- a/http_service/tests/test_integration.py +++ b/http_service/tests/test_integration.py @@ -28,7 +28,10 @@ def integration_test_single(): response_json = response.json() if not response.ok: - raise Exception(f"Couldn't get an answer in {timeout} seconds: {response_json}") + raise requests.HTTPError( + f"Couldn't get an answer in {timeout} seconds: {response_json}", + response=response, + ) print("Response for bug 1376406", response_json) assert response_json["class"] is not None @@ -52,7 +55,10 @@ def integration_test_batch(): response_json = response.json() if not response.ok: - raise Exception(f"Couldn't get an answer in {timeout} seconds: {response_json}") + raise requests.HTTPError( + f"Couldn't get an answer in {timeout} seconds: {response_json}", + response=response, + ) response_1376544 = response_json["bugs"]["1376544"] print("Response for bug 1376544", response_1376544) diff --git a/infra/version_check.py b/infra/version_check.py index fab2f84c36..287a02ea6c 100644 --- a/infra/version_check.py +++ b/infra/version_check.py @@ -18,7 +18,7 @@ print(e.stdout) print("stderr:") print(e.stderr) - raise Exception("Failure while getting latest tag") + raise RuntimeError("Failure while getting latest tag") cur_tag = p.stdout.decode("utf-8")[1:].rstrip() diff --git a/scripts/commit_classifier.py b/scripts/commit_classifier.py index a461e18aa9..5c084316b2 100644 --- a/scripts/commit_classifier.py +++ b/scripts/commit_classifier.py @@ -329,7 +329,7 @@ def load_user(phid): # TODO: Support group reviewers somehow. logger.info(f"Skipping group reviewer {phid}") else: - raise Exception(f"Unsupported reviewer {phid}") + raise ValueError(f"Unsupported reviewer {phid}") for patch in needed_stack: revision = revisions[patch.phid]