Skip to content

Commit

Permalink
Move benchmark tests to a dedicated folder, like smoke.
Browse files Browse the repository at this point in the history
Also more cleanly separate out ops and ops-scenario, to be consistent with what we do elsewhere. There are no ops benchmark tests yet but put the bits in place to add them, since we will want to have some at some point.
  • Loading branch information
tonyandrewmeyer committed Dec 18, 2024
1 parent 82ca4e2 commit b362f13
Show file tree
Hide file tree
Showing 4 changed files with 79 additions and 32 deletions.
24 changes: 24 additions & 0 deletions test/benchmark/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Copyright 2024 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Benchmark tests for ops.
Optimising performance is not a current goal with ops or - any gains are
unlikely to be significant compared with ones from Juju or the charm and
its workload. However, we do want to ensure that we do not unknowingly
regress in performance.
This package is for tests that cover core functionality, to be used for
performance benchmarking.
"""
22 changes: 22 additions & 0 deletions testing/tests/benchmark/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# Copyright 2024 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Benchmark tests for ops-scenario.
Optimising performance is not a current goal with ops-scenario. However,
we do want to ensure that we do not unknowingly regress in performance.
This package contains a small set of tests that cover core functionality,
to be used for performance benchmarking.
"""
51 changes: 25 additions & 26 deletions test/test_benchmark.py → testing/tests/benchmark/test_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

"""Benchmark tests for ops and ops-scenario.
Optimising performance is not a current goal with either ops or
ops-scenario - any gains are unlikely to be significant compared with ones
from Juju or the charm and its workload. However, we do want to ensure that
we do not unknowingly regress in performance.
This module contains a small set of tests that cover core functionality,
to be used for performance benchmarking.
"""
"""Benchmark tests for ops-scenario."""

import dataclasses
import pathlib
Expand All @@ -30,15 +21,23 @@
import ops
from ops import testing

sys.path.append(str(pathlib.Path(__file__).parent / 'charms' / 'test_benchmark' / 'src'))
sys.path.append(
str(
pathlib.Path(__file__).parent.parent.parent.parent
/ "test"
/ "charms"
/ "test_benchmark"
/ "src"
)
)

from bcharm import BenchmarkCharm


# Note: the 'benchmark' argument here is a fixture that pytest-benchmark
# automatically makes available to all tests.
def test_context_explicit_meta(benchmark):
ctx = benchmark(testing.Context, ops.CharmBase, meta={'name': 'foo'})
ctx = benchmark(testing.Context, ops.CharmBase, meta={"name": "foo"})
assert isinstance(ctx, testing.Context)


Expand All @@ -58,11 +57,11 @@ def test_context_explicit_meta_config_actions(benchmark):
ctx = benchmark(
testing.Context,
ops.CharmBase,
meta={'name': 'foo'},
actions={'act': {'description': 'foo'}},
config={'options': {'conf': {'type': 'int', 'description': 'bar'}}},
meta={"name": "foo"},
actions={"act": {"description": "foo"}},
config={"options": {"conf": {"type": "int", "description": "bar"}}},
)
ctx.run(ctx.on.action('act'), testing.State(config={'conf': 10}))
ctx.run(ctx.on.action("act"), testing.State(config={"conf": 10}))
assert len({e.handle.kind for e in ctx.emitted_events}) == 1


Expand All @@ -74,9 +73,9 @@ def test_context_autoload_meta(benchmark):
def test_many_tests_explicit_meta(benchmark):
def mock_pytest():
"""Simulate running multiple tests against the same charm."""
for event in ('install', 'start', 'stop', 'remove'):
for event in ("install", "start", "stop", "remove"):
for _ in range(5):
ctx = testing.Context(ops.CharmBase, meta={'name': 'foo'})
ctx = testing.Context(ops.CharmBase, meta={"name": "foo"})
ctx.run(getattr(ctx.on, event)(), testing.State())
assert len({e.handle.kind for e in ctx.emitted_events}) == 1

Expand All @@ -86,7 +85,7 @@ def mock_pytest():
def test_many_tests_autoload_meta(benchmark):
def mock_pytest():
"""Simulate running multiple tests against the same charm."""
for event in ('install', 'start', 'stop', 'remove'):
for event in ("install", "start", "stop", "remove"):
for _ in range(5):
ctx = testing.Context(BenchmarkCharm)
ctx.run(getattr(ctx.on, event)(), testing.State())
Expand All @@ -103,16 +102,16 @@ def test_lots_of_logs(benchmark):

def ditest_full_state(benchmark):
def fill_state():
rel = testing.Relation('rel')
peer = testing.PeerRelation('peer')
network = testing.Network('MySpace')
container = testing.Container('foo')
storage = testing.Storage('bar')
rel = testing.Relation("rel")
peer = testing.PeerRelation("peer")
network = testing.Network("MySpace")
container = testing.Container("foo")
storage = testing.Storage("bar")
tcp = testing.TCPPort(22)
icmp = testing.ICMPPort()
udp = testing.UDPPort(8000)
secret = testing.Secret({'password': 'admin'})
resource = testing.Resource(name='baz', path='.')
secret = testing.Secret({"password": "admin"})
resource = testing.Resource(name="baz", path=".")
stored_state = testing.StoredState()
state = testing.State(
relations={rel, peer},
Expand Down
14 changes: 8 additions & 6 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ envlist = lint, static, unit
src_path = ops/
tst_path = test/
all_path = {[vars]src_path} {[vars]tst_path}
testing_src_path = testing/src/scenario/
testing_tst_path = testing/tests/

[testenv]
basepython = python3
Expand Down Expand Up @@ -104,7 +106,8 @@ deps =
-e .
-e testing
commands =
pytest -n auto --ignore={[vars]tst_path}smoke --ignore={[vars]tst_path}test_benchmark.py \
pytest -n auto --ignore={[vars]tst_path}smoke \
--ignore={[vars]tst_path}benchmark --ignore={[vars]testing_tst_path}benchmark \
-v --tb native \
-W 'ignore:Harness is deprecated:PendingDeprecationWarning' {posargs}

Expand All @@ -125,8 +128,9 @@ deps =
-e testing
commands =
mkdir -p .report
coverage run --source={[vars]src_path},testing/src/scenario \
-m pytest --ignore={[vars]tst_path}smoke --ignore={[vars]tst_path}test_benchmark.py \
coverage run --source={[vars]src_path},{[vars]testing_src_path} \
-m pytest --ignore={[vars]tst_path}smoke \
--ignore={[vars]tst_path}benchmark --ignore={[vars]testing_tst_path}benchmark \
-v --tb native \
-W 'ignore:Harness is deprecated:PendingDeprecationWarning' {posargs}
coverage xml -o .report/coverage.xml
Expand All @@ -140,15 +144,13 @@ passenv =
deps =
PyYAML==6.*
websocket-client==1.*
coverage[toml]~=7.0
pytest~=7.2
pytest-benchmark
typing_extensions~=4.2
jsonpatch~=1.33
-e .
-e testing
commands =
pytest --ignore={[vars]tst_path}smoke -v --tb native --benchmark-only {posargs}
pytest -v --tb native {[vars]tst_path}benchmark {[vars]testing_tst_path}benchmark {posargs}

[testenv:pebble]
description = Run real pebble tests
Expand Down

0 comments on commit b362f13

Please sign in to comment.