From eab4caabab7b9bff4f23d5a35cfbc95c4327dc47 Mon Sep 17 00:00:00 2001 From: Einar Date: Sun, 7 Apr 2024 15:04:55 +0200 Subject: [PATCH] Rewrite the client to be async all the way through (#317) * Client implementation * Documentation --- .gitignore | 3 +- Cargo.lock | 164 +- docs/async_client.md | 90 + docs/client.md | 170 +- integration/Cargo.toml | 1 + integration/src/harness.rs | 332 +-- integration/src/tests.rs | 302 +- lib/Cargo.toml | 4 + lib/src/client/builder.rs | 291 +- lib/src/client/callbacks.rs | 161 -- lib/src/client/client.rs | 755 ----- lib/src/client/comms/mod.rs | 9 - lib/src/client/comms/tcp_transport.rs | 621 ---- lib/src/client/comms/transport.rs | 8 - lib/src/client/config.rs | 270 +- lib/src/client/message_queue.rs | 144 - lib/src/client/mod.rs | 127 +- lib/src/client/retry.rs | 137 + lib/src/client/session/client.rs | 779 +++++ lib/src/client/session/connect.rs | 111 + lib/src/client/session/event_loop.rs | 333 +++ lib/src/client/session/mod.rs | 102 +- lib/src/client/session/services.rs | 914 ------ lib/src/client/session/services/attributes.rs | 297 ++ lib/src/client/session/services/method.rs | 100 + lib/src/client/session/services/mod.rs | 6 + .../session/services/node_management.rs | 154 + lib/src/client/session/services/session.rs | 370 +++ .../services/subscriptions/event_loop.rs | 169 ++ .../session/services/subscriptions/mod.rs | 451 +++ .../session/services/subscriptions/service.rs | 984 +++++++ .../services/subscriptions/state.rs} | 136 +- lib/src/client/session/services/view.rs | 232 ++ lib/src/client/session/session.rs | 2502 +---------------- lib/src/client/session/session_state.rs | 602 ---- lib/src/client/session_retry_policy.rs | 209 -- lib/src/client/subscription.rs | 389 --- lib/src/client/tests/mod.rs | 161 -- lib/src/client/transport/buffer.rs | 368 +++ lib/src/client/transport/channel.rs | 269 ++ lib/src/client/transport/core.rs | 308 ++ lib/src/client/transport/mod.rs | 9 + lib/src/client/transport/state.rs | 215 ++ lib/src/client/transport/tcp.rs | 277 ++ lib/src/core/comms/secure_channel.rs | 3 +- lib/src/core/handle.rs | 77 +- lib/src/lib.rs | 3 - lib/src/server/discovery/mod.rs | 23 +- lib/src/server/server.rs | 40 +- lib/src/server/services/message_handler.rs | 4 +- lib/src/types/encoding.rs | 118 +- lib/src/types/mod.rs | 2 +- lib/src/types/qualified_name.rs | 6 + lib/src/types/service_types/read_value_id.rs | 2 +- lib/src/types/tests/encoding.rs | 45 +- samples/client.conf | 25 +- samples/discovery-client/Cargo.toml | 1 + samples/discovery-client/src/main.rs | 26 +- samples/event-client/Cargo.toml | 1 + samples/event-client/src/main.rs | 122 +- samples/mqtt-client/Cargo.toml | 1 + samples/mqtt-client/src/main.rs | 162 +- samples/simple-client/Cargo.toml | 1 + samples/simple-client/src/main.rs | 117 +- samples/web-client/Cargo.toml | 1 + samples/web-client/src/main.rs | 242 +- 66 files changed, 7454 insertions(+), 7604 deletions(-) create mode 100644 docs/async_client.md delete mode 100644 lib/src/client/callbacks.rs delete mode 100644 lib/src/client/client.rs delete mode 100644 lib/src/client/comms/mod.rs delete mode 100644 lib/src/client/comms/tcp_transport.rs delete mode 100644 lib/src/client/comms/transport.rs delete mode 100644 lib/src/client/message_queue.rs create mode 100644 lib/src/client/retry.rs create mode 100644 lib/src/client/session/client.rs create mode 100644 lib/src/client/session/connect.rs create mode 100644 lib/src/client/session/event_loop.rs delete mode 100644 lib/src/client/session/services.rs create mode 100644 lib/src/client/session/services/attributes.rs create mode 100644 lib/src/client/session/services/method.rs create mode 100644 lib/src/client/session/services/mod.rs create mode 100644 lib/src/client/session/services/node_management.rs create mode 100644 lib/src/client/session/services/session.rs create mode 100644 lib/src/client/session/services/subscriptions/event_loop.rs create mode 100644 lib/src/client/session/services/subscriptions/mod.rs create mode 100644 lib/src/client/session/services/subscriptions/service.rs rename lib/src/client/{subscription_state.rs => session/services/subscriptions/state.rs} (56%) create mode 100644 lib/src/client/session/services/view.rs delete mode 100644 lib/src/client/session/session_state.rs delete mode 100644 lib/src/client/session_retry_policy.rs delete mode 100644 lib/src/client/subscription.rs delete mode 100644 lib/src/client/tests/mod.rs create mode 100644 lib/src/client/transport/buffer.rs create mode 100644 lib/src/client/transport/channel.rs create mode 100644 lib/src/client/transport/core.rs create mode 100644 lib/src/client/transport/mod.rs create mode 100644 lib/src/client/transport/state.rs create mode 100644 lib/src/client/transport/tcp.rs diff --git a/.gitignore b/.gitignore index 288b089a5..9cb61f483 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ log/ /samples/server.test.conf /integration/pki-client /integration/pki-server -3rd-party/open62541/build/ \ No newline at end of file +3rd-party/open62541/build/ +lib/pki* \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 58831c740..a4c7a1282 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -38,8 +38,8 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.4.2", - "bytes 1.5.0", + "bitflags 2.5.0", + "bytes 1.6.0", "futures-core", "futures-sink", "memchr", @@ -59,8 +59,8 @@ dependencies = [ "actix-service", "actix-utils", "actix-web 4.5.1", - "bitflags 2.4.2", - "bytes 1.5.0", + "bitflags 2.5.0", + "bytes 1.6.0", "derive_more", "futures-core", "http-range", @@ -84,9 +84,9 @@ dependencies = [ "actix-utils", "ahash", "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.5.0", "brotli", - "bytes 1.5.0", + "bytes 1.6.0", "bytestring", "derive_more", "encoding_rs", @@ -96,7 +96,7 @@ dependencies = [ "http 0.2.12", "httparse", "httpdate", - "itoa 1.0.10", + "itoa 1.0.11", "language-tags 0.3.2", "local-channel", "mime", @@ -104,7 +104,7 @@ dependencies = [ "pin-project-lite", "rand 0.8.5", "sha1 0.10.6", - "smallvec 1.13.1", + "smallvec 1.13.2", "tokio 1.36.0", "tokio-util", "tracing", @@ -118,7 +118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -276,7 +276,7 @@ dependencies = [ "actix-utils", "actix-web-codegen", "ahash", - "bytes 1.5.0", + "bytes 1.6.0", "bytestring", "cfg-if 1.0.0", "cookie 0.16.2", @@ -284,7 +284,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "itoa 1.0.10", + "itoa 1.0.11", "language-tags 0.3.2", "log 0.4.21", "mime", @@ -294,7 +294,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded 0.7.1", - "smallvec 1.13.1", + "smallvec 1.13.2", "socket2 0.5.6", "time 0.3.34", "url 2.5.0", @@ -309,7 +309,7 @@ dependencies = [ "actix-router", "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -461,9 +461,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "autocfg" @@ -471,20 +471,20 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dde43e75fd43e8a1bf86103336bc699aa8d17ad1be60c76c0bdfd4828e19b78" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -524,9 +524,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "block-buffer" @@ -611,9 +611,9 @@ dependencies = [ [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "bytestring" @@ -621,7 +621,7 @@ version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", ] [[package]] @@ -778,7 +778,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", "cfg-if 0.1.10", "crossbeam-utils 0.7.2", "lazy_static", @@ -814,7 +814,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", "cfg-if 0.1.10", "lazy_static", ] @@ -876,7 +876,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1191,7 +1191,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -1306,13 +1306,13 @@ version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "fnv", "futures-core", "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.5", + "indexmap 2.2.6", "slab", "tokio 1.36.0", "tokio-util", @@ -1385,9 +1385,9 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "fnv", - "itoa 1.0.10", + "itoa 1.0.11", ] [[package]] @@ -1464,15 +1464,15 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", "hashbrown 0.12.3", ] [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1519,9 +1519,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" @@ -1629,7 +1629,7 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", "scopeguard 1.2.0", ] @@ -1724,7 +1724,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", ] [[package]] @@ -1839,7 +1839,7 @@ version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", ] [[package]] @@ -1880,10 +1880,11 @@ dependencies = [ "actix-files", "actix-web 4.5.1", "arbitrary", + "arc-swap", "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", - "bytes 1.5.0", + "bytes 1.6.0", "chrono", "derivative", "env_logger", @@ -1893,6 +1894,7 @@ dependencies = [ "lazy_static", "libc", "log 0.4.21", + "opcua", "openssl", "openssl-sys", "parking_lot 0.12.1", @@ -1906,7 +1908,7 @@ dependencies = [ "tokio 1.36.0", "tokio-util", "url 1.7.2", - "uuid 1.7.0", + "uuid 1.8.0", ] [[package]] @@ -1945,6 +1947,7 @@ version = "0.13.0" dependencies = [ "opcua", "pico-args", + "tokio 1.36.0", ] [[package]] @@ -1953,6 +1956,7 @@ version = "0.13.0" dependencies = [ "opcua", "pico-args", + "tokio 1.36.0", ] [[package]] @@ -1962,6 +1966,7 @@ dependencies = [ "chrono", "log 0.4.21", "opcua", + "tokio 1.36.0", ] [[package]] @@ -1971,6 +1976,7 @@ dependencies = [ "opcua", "pico-args", "rumqttc", + "tokio 1.36.0", ] [[package]] @@ -1979,6 +1985,7 @@ version = "0.13.0" dependencies = [ "opcua", "pico-args", + "tokio 1.36.0", ] [[package]] @@ -1996,6 +2003,7 @@ version = "0.13.0" dependencies = [ "actix", "actix-web 0.7.19", + "futures-util", "opcua", "pico-args", "serde", @@ -2010,7 +2018,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -2027,7 +2035,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -2144,7 +2152,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall 0.4.1", - "smallvec 1.13.1", + "smallvec 1.13.2", "windows-targets 0.48.5", ] @@ -2484,9 +2492,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", @@ -2507,9 +2515,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "remove_dir_all" @@ -2551,7 +2559,7 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d8941c6791801b667d52bfe9ff4fc7c968d4f3f9ae8ae7abdaaa1c966feafc8" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "flume", "futures-util", "log 0.4.21", @@ -2738,16 +2746,16 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ - "itoa 1.0.10", + "itoa 1.0.11", "ryu", "serde", ] @@ -2771,19 +2779,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.10", + "itoa 1.0.11", "ryu", "serde", ] [[package]] name = "serde_yaml" -version = "0.9.33" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0623d197252096520c6f2a5e1171ee436e5af99a5d7caa2891e55e61950e6d9" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.5", - "itoa 1.0.10", + "indexmap 2.2.6", + "itoa 1.0.11", "ryu", "serde", "unsafe-libyaml", @@ -2843,7 +2851,7 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ - "autocfg 1.1.0", + "autocfg 1.2.0", ] [[package]] @@ -2857,9 +2865,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" @@ -2936,9 +2944,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.53" +version = "2.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", @@ -2993,7 +3001,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3024,7 +3032,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", - "itoa 1.0.10", + "itoa 1.0.11", "num-conv", "powerfmt", "serde", @@ -3094,7 +3102,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", - "bytes 1.5.0", + "bytes 1.6.0", "libc", "mio 0.8.11", "num_cpus", @@ -3167,7 +3175,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] @@ -3308,7 +3316,7 @@ version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ - "bytes 1.5.0", + "bytes 1.6.0", "futures-core", "futures-sink", "pin-project-lite", @@ -3543,9 +3551,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "getrandom 0.2.12", ] @@ -3644,7 +3652,7 @@ dependencies = [ "once_cell", "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", "wasm-bindgen-shared", ] @@ -3666,7 +3674,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3903,7 +3911,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2 1.0.79", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.55", ] [[package]] diff --git a/docs/async_client.md b/docs/async_client.md new file mode 100644 index 000000000..dab54afbe --- /dev/null +++ b/docs/async_client.md @@ -0,0 +1,90 @@ +# Async Client + +The client has been rewritten from a synchronous API to an asynchronous API. This came in the form of a rather large patch. This document lays out the why and what of that change. + +## Why + +The client was already fundamentally built upon tokio, and the core of the client was async. The rewrite started as a smaller project intended to simply change the existing implementation to allow that to bubble up. This has been attempted in the past. However, it quickly became clear that the existing implementation was hard to change in that way, and it seemed likely that a rewrite would not really produce a better quality library, but rather make it even more complex and fragile. + +### Locking + +In particular, the library did (and still does, to a slightly lesser degree) contain a lot synchronous locks. This has historically lead to several bugs, as documented by the extensive use of tracing macros to debug deadlocks. A PR #146 made an effort to switch from std::sync::Mutex/RwLock to parking_lot, but this may have made the problem _worse_. + +In order to produce a deadlock, you _must_ hold two forms of synchronization mechanisms at the same time. The simplest way to make deadlock-free code is to forbid this, which is often easier said than done. The code as is did avoid deadlocks for the most part, but _await points are synchronization_. By allowing locks to be held accross await points, channel uses, or thread joins, there is an implicit risk of deadlocks. + +This rewrite does still use locks, it is largely unavoidable. More locks could have been removed, but that would have required rewriting the server as well. It does, however, attempt to replace locks in a few ways. + + - Assign clearer ownership of components. For example, the transport layer now owns the message buffer. + - Use more specialized synchronization mechanisms. The `Handle` implementation now uses atomics, as does the `DepthGauge`. A few types have been swapped for `ArcSwap`, which while still a form of locking, is less intrusive than an `RwLock` or `Mutex` for things that are written rarely, read from often, and never mutated except for being replaced, like channel and session IDs. + +There is still a lot more to do here, but this is hopefully a decent start. + +### Weight + +While less obviously a goal, this rewrite had a few major requirements: + + - The API should be entirely async. + - The client should never spawn a thread, unless explicity asked to by the user. + - The user must be able to apply backpressure to the client. + +Async rust is uniquely well suited to these requirements. Implementing this allows us to create a very light weight client, but this necessitated further changes to the API. + +Everything is now driven by a set of event loops wrapped in a "poll" API. This means that it is fully possible to run the client entirely cooperatively on a single OS thread, without any tokio tasks. Alternatively, users can quite easily just spawn the event loop on a tokio task. This design is inspired by the wonderful rust MQTT library rumqttc. + +This does have a few downsides. + + - There is no single method to connect to the server, though one could be written. You have to use `wait_for_connection` after spawning the event loop. Having the event loop own the connection was essential in order to avoid excessive locking and complexity in the client. + - There is no way to have a session object with just a secure channel and no active session. This simplifies the session a bit (it now has just two real states, connected and not connected), but it makes the code for making calls to a server without a session a bit hairy, see `client.rs`. + - The transport event loop shares a single thread with everything else the session does. If this ever becomes a problem it is very possible to make the session optionally run the transport in an internal task. + +## What + +### Functional changes + +As mentioned the API is now entirely async. Originally the plan was to keep the existing synchronous API in place. It would be relatively trivial to write one, but at the same time it is somewhat un-rusty to create such an API. If a user wants to interface with the client from blocking code, they should have to make that decision explicitly. Writing one would just be a matter of creating a client containing the session and a tokio runtime, than calling `runtime.block_on(...)` for each service. + +The old client, by accident or on purpose, did not support pipelining requests, due to `send_request` requiring a mutable reference to the session state. The new client does, by lowering the message queue to the transport layer, and by eliminating the `SessionState` entirely, instead keeping multiple independent pieces of state in the client or the event loop. + +The service traits are gone. They were entirely unused, except as a way to group services, and potentially as a way for users to have mocking? Keeping them would have been possible, but it would have required a higher MSRV. + +Callbacks have changed slightly, mostly due to the way notifications are received. The subscription logic is similar, but the way `MonitoredItems` queued values seemed largely meaningless, as they were always immediately dequeued (or if they weren't that would be a user error?), it seemed like a feature that was mostly unhelpful. The connect/disconnect callbacks are also gone, though they could be reimplemented. It is possible to monitor the state of the session by watching the output of the event loop `poll` method, see the `web-client` sample for an example of how this could be done. + +The client is no longer behind a lock. Any locking is internal, except for access to the subscription cache directly. This should be a strictly positive change for usability. + +### Removal of the prelude + +Big glob-imports are generally discouraged, and are apparently bad for compile time. In general, a user who wants to glob-import everything can do so through `types::*` and `client::*`. Being explicit about what we expose is good for semver compatibility, and `prelude` should be reserved for imports such as core traits and essential types. Ideally we would remove this from the server as well, but this patch aimed to be constrained to the client as much as possible. + +### Changes outside of the client + +A few changes were made outside of the client, though they are very limited in scope. + + - Use atomics in the `Handle` and `DepthGauge` implementations. + - Use the async client in the server. This was actually quite complicated, since that code did a _lot_ of locking, taking three mutexes at the same time, which almost immediately blocked and deadlocked. The implementation now avoids that, but is perhaps a bit more clone-happy. + - The integration tests of course had to be rewritten. They are now enabled by default, since it turns out they now run in about a second total. If the server was rewritten to async as well they could probably run in a few hundred milliseconds at most. + - The samples needed to be rewritten as well. + - A pair of default implementations was added for `ReadValueId` and `QualifiedName`, just for convenience. `QualifiedName` has `null` which is a very reasonable default. + +### Remaining gaps + +There are a few things that were deemed out of scope for the initial implementation. Implementing these need not be complicated, but they are also not strictly necessary for a fully featured async client. + + - Wrapper methods for creating session and connecting. This could be useful, and is likely fairly easy to do. We would create a session, temporarily poll the event loop, and connect to the client. These methods would have to return an `impl Stream`, or a `JoinHandle`, since we would need to start the event loop to connect. + - A wrapper around the event loop to provide a better interface for monitoring the connection. + - Better control over subscription transfers. Currently it is a bit magical. Calling `transfer_subscriptions` is fine, but actually recreating them is a bit agressive. Especially if the user has tens or hundreds of thousands of monitored items. This change adds chunking to that process, but this is a bit of a stopgap measure. + - Utility methods for retries on service calls. + - Mechanisms for handling session loss without an actual lost TCP connection. This is partially covered in the old sync client, so there is a clear gap here. It requires some careful investigation however, to determine the best way to deal with this. + - Discovery endpoints on the session. They are currently only available on the client itself, which spins up a new connection. Technically they are available on sessions as well. + +## Future projects + +This is a large patch, hopefully without too many bugs, though it is hard to say for sure. While it is huge, it is constrained to the client for the most part. In the process of writing this, a few issues came up that make for nice future projects: + + - Rewrite `StatusCode` by hand. The current implementation uses `bitflags` but this works poorly (the debug output is very misleading), since `StatusCode`s in OPC-UA really aren't bit flags in that sense. A manual implementation is a moderate amount of work, but would probably be helpful. + - Rewrite the server. This is probably an even larger project than the client, having it be async all the way through would be very helpful as well. + - Use more sophisticated errors than just `StatusCode`. This is a problem with other OPC-UA implementations as well, it can be hard to tell whether an error comes from the server or the client, or any other details in general. The session logs when errors are encountered, but logs scale poorly and cannot be handled programatically. While every error should _have_ a status code, there is no reason why we couldn't use normal rust errors with custom `status_code` methods and the option for more debug info. + - Look into using pure rust crypto libraries as an alternative. Rust crypto has come a long way, and avoiding external libraries can improve the portability of the code. + - Make the transport layer generic to allow for other transports. + - Expand on the session with more utility methods for making more sophisticated requests to OPC-UA servers. Continuation point handling, server limits, events, etc. + - Fix all the warnings on recent rust versions. There are a few deprecated methods, and a few warnings related to glob imports mentioned above. + diff --git a/docs/client.md b/docs/client.md index 956b40435..fbc11369e 100644 --- a/docs/client.md +++ b/docs/client.md @@ -34,9 +34,11 @@ If you want to see a finished version of this, look at `opcua/samples/simple-cli From a coding perspective a typical use would be this: 1. Create a `Client`. The easiest way is with a `ClientBuilder`. -2. Call the client to connect to a server endpoint and create a `Session` -3. Call functions on the session which make requests to the server, e.g. read a value, or monitor items -4. Run in a loop doing 3 repeatedly or exit +2. Create a `Session` and `SessionEventLoop` from a server endpoint. +3. Begin polling the event loop, either in a tokio `Task` or in a `select!` block. +4. Wait for the event loop to connect to the server. +5. Call functions on the session which make requests to the server, e.g. read a value, or monitor items +6. Run in a loop doing 5 repeatedly or exit Most of the housekeeping and detail is handled by the API. You just need to point the client at the server, and set things up before calling stuff on the session. @@ -51,27 +53,15 @@ cargo init --bin test-client ## Import the crate -The `opcua-client` is the crate containing the client side API. So first edit your `Cargo.toml` to -add that dependency: +The `opcua` is the crate containing the client side API. So first edit your `Cargo.toml` to +add that dependency. You will also need `tokio`: ```toml [dependencies] -opcua = { version = "0.12", features = ["client"] } +opcua = { version = "0.14", features = ["client"] } +tokio = { version = "1", features = ["full"] } ``` -## Import types - -OPC UA has a *lot* of types and structures and the client has structs representing the client, -session and open connection. - -To pull these in, add this to the top of your `main.rs`: - -```rust -use opcua::client::prelude::*; -``` - -The `prelude` module contains all of the things a basic client needs. - ## Create your client The `Client` object represents a configured client describing its identity and set of behaviours. @@ -184,7 +174,8 @@ A `Client` can connect to any server it likes. There are a number of ways to do We'll go ad hoc. So in your client code you will have some code like this. ```rust -fn main() { +#[tokio::main] +async fn main() { //... create Client // Create an endpoint. The EndpointDescription can be made from a tuple consisting of @@ -197,7 +188,18 @@ fn main() { ).into(); // Create the session - let session = client.connect_to_endpoint(endpoint, IdentityToken::Anonymous).unwrap(); + let (session, event_loop) = client.new_session_from_endpoint(endpoint, IdentityToken::Anonymous).await.unwrap(); + + // Spawn the event loop on a tokio task. + let mut handle = event_loop.spawn(); + tokio::select! { + r = &mut handle => { + println!("Session failed to connect! {r}"); + return; + } + _ = session.wait_for_connection().await => {} + } + //... use session } @@ -206,76 +208,21 @@ fn main() { This command asks the API to connect to the server `opc.tcp://localhost:4855/` with a security policy / message mode of None / None, and to connect as an anonymous user. -Assuming the connect success and returns `Ok(session)` then we now have a session to the server. - -Note you will always get a `session` even if activation failed, i.e. if your identity token was -invalid for the endpoint your connection will be open but every call will fail with a `StatusCode::BadSessionNotActivated` -service fault until you call `activate_session()` successfully. - -## Using the Session object - -Note that the client returns sessions wrapped as a `Arc>`. The `Session` is locked because -the code shares it with the OPC UA for Rust internals. - -That means to use a session you must lock it to obtain read or write access to it. e.g, - -```rust -// Obtain a read-write lock to the session -let session = session.write().unwrap(); -// call it. -``` - -Since you share the Session with the internals, you MUST relinquish the lock in a timely fashion. i.e. -you should never lock it open at the session start because OPC UA will never be able to obtain it and will -break. - -#### Avoiding deadlock - -You MUST release any lock before invoking `Session::run(session)` or the client will deadlock - the -run loop will be waiting for the lock that will never release. - -Therefore avoid this code: +Note that this does not connect to the server, only identify a server endpoint to connect to and create the necessary types to manage that connection. -```rust -let s = session.write().unwrap(); -// ... create some subscriptions, monitored items -// DANGER. Session is still locked on this thread and will deadlock. -let _ = Session::run(session); -``` +The `event_loop` is responsible for maintaining the connection to the server. We run it in a background thread for convenience. In this case, if the event loop terminates, we have failed to connect to the server, even after retries. -Use a scope or a function to release the lock before you hit `Session::run(session)`: +In order to avoid waiting forever on a connection, we watch the handle in a `select!`. -```rust -{ - let mut session = session.write().unwrap(); - // ... create some subscriptions, monitored items -} -let _ = Session::run(session); -``` +Once `wait_for_connection` returns, if the event loop has not terminated, we have an open and activated session. ## Calling the server Once we have a session we can ask the server to do things by sending requests to it. Requests correspond to services implemented by the server. Each request is answered by a response containing the answer, or a service fault if the -service is in error. - -First a word about synchronous and asynchronous calls. - -### Synchronous calls - -The OPC UA for Rust client API is _mostly_ synchronous by design. i.e. when you call the a function, the request will be -sent to the server and the call will block until the response is received or the call times out. - -This makes the client API easy to use. +service is in error. -### Asynchronous calls - -Under the covers, all calls are asynchronous. Requests are dispatched and responses are handled asynchronously -but the client waits for the response it is expecting or for the call to timeout. - -The only exception to this are publish requests and responses which are always asynchronous. These are handled -internally by the API from timers. If a publish response contains changes from a subscription, the subscription's -registered callback will be called asynchronously from another thread. +The API is asynchronous, and only requires a shared reference to the session. This means that you can make multiple independent requests concurrently. The session only keeps a single connection to the server, but OPC-UA supports _pipelining_ meaning that you can send several requests to the server at the same time, then receive them out of order. ### Calling a service @@ -288,63 +235,42 @@ Here is code that creates a subscription and adds a monitored item to the subscr ```rust { - let mut session = session.write().unwrap(); - let subscription_id = session.create_subscription(2000.0, 10, 30, 0, 0, true, DataChangeCallback::new(|changed_monitored_items| { + let subscription_id = session.create_subscription(std::time::Duration::from_millis(2000), 10, 30, 0, 0, true, DataChangeCallback::new(|changed_monitored_items| { println!("Data change from server:"); changed_monitored_items.iter().for_each(|item| print_value(item)); - }))?; + })).await?; // Create some monitored items let items_to_create: Vec = ["v1", "v2", "v3", "v4"].iter() .map(|v| NodeId::new(2, *v).into()).collect(); - let _ = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create); + let _ = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, items_to_create).await?; } ``` Note the call to `create_subscription()` requires an implementation of a callback. There is a `DataChangeCallback` -helper for this purpose that calls your function with any changed items. - -## Running a loop +helper for this purpose that calls your function with any changed items, but you can also implement it yourself for more complex use cases. -You may want to run continuously after you've created a session. There are two ways to do this depending on what you -are trying to achieve. +## Monitoring the event loop -## Session::run +Using `event_loop.spawn` is convenient if you do not care what the session is doing, but in general you want to know what is happening so that your code can react to it. The `event_loop` _drives_ the entire session including sending and receiving messages, monitoring subscriptions, and establishing and maintaining the connection. -If all you did is subscribe to some stuff and you have no further work to do then you can just call `Session::run()`. +You can watch these events yourself by using `event_loop.enter`, which returns a `Stream` of `SessionPollResult` items. ```rust -Session::run(session); -``` - -This function synchronously runs forever on the thread, blocking until the client sets an abort flag and breaks, or the connection breaks and any retry limit is exceeded. -## Session::run_async +tokio::task::spawn(async move { + // Using `next` requires the futures_util package. + while let Some(evt) = event_loop.next() { + match evt { + Ok(SessionPollResult::ConnectionLost(status)) => { /* connection lost */ }, + Ok(SessionPollResult::Reconnected(mode)) => { /* connection established */ }, + Ok(SessionPollResult::ReconnectFailed(status)) => { /* connection attempt failed */ }, + Err(e) => { /* Exhausted connect retries, the stream will exit now */ }, + _ => { /* Other events */ } + } + } +}) -If you intend writing your own loop then the session's loop needs to run asynchronously on another thread. In this case you call `Session::async_run()`. When you call it, a new thread is spawned to maintain the session and the calling thread -is free to do something else. So for example, you could write a polling loop of some kind. The call to `run_async()` returns an `tokio::oneshot::Sender` that allows you to send a message to stop the session running on -the other thread. You must capture that sender returned by the function in a variable or it will drop and the session will -also drop. - -```rust -let session_tx = Session::run_async(session.clone()); -loop { - // My loop - { - // I want to poll a value from OPC UA - let session = session.write().unwrap(); - let value = session.read(....); - //... process value - } - - let some_reason_to_quit() { - // Terminate the session loop - session_tx.send(SessionCommand.stop()); - } - - // Maybe I sleep in my loop because it polls - std::thread::sleep(Duration::from_millis(2000);) -} ``` ## That's it diff --git a/integration/Cargo.toml b/integration/Cargo.toml index 9fed2037d..aa141455b 100644 --- a/integration/Cargo.toml +++ b/integration/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dev-dependencies] log = "0.4" chrono = "0.4" +tokio = { version = "1", features = ["full"] } [dev-dependencies.opcua] path = "../lib" diff --git a/integration/src/harness.rs b/integration/src/harness.rs index 2064c43af..34d3bba32 100644 --- a/integration/src/harness.rs +++ b/integration/src/harness.rs @@ -1,19 +1,20 @@ -use std::time::Instant; +use std::future::Future; +use std::time::{Duration, Instant}; use std::{ path::PathBuf, sync::{ atomic::{AtomicUsize, Ordering}, - mpsc, - mpsc::channel, Arc, }, - thread, time, }; +use tokio::select; +use tokio::sync::mpsc; +use tokio::sync::mpsc::unbounded_channel; use log::*; +use opcua::client::{Client, ClientBuilder, IdentityToken}; use opcua::{ - client::prelude::*, runtime_components, server::{ builder::ServerBuilder, callbacks, config::ServerEndpoint, prelude::*, @@ -24,7 +25,7 @@ use opcua::{ use crate::*; -const TEST_TIMEOUT: i64 = 30000; +const TEST_TIMEOUT: u64 = 30000; pub fn functions_object_id() -> NodeId { NodeId::new(2, "Functions") @@ -118,7 +119,7 @@ pub fn new_server(port: u16) -> Server { .application_uri("urn:integration_server") .discovery_urls(vec![endpoint_url(port, endpoint_path).to_string()]) .create_sample_keypair(true) - .pki_dir("./pki-server") + .pki_dir(format!("./pki-server/{}", port)) .discovery_server_url(None) .host_and_port(hostname(), port) .user_token(sample_user_id, server_user_token()) @@ -333,19 +334,25 @@ impl callbacks::Method for HelloX { } } -fn new_client(_port: u16) -> Client { - ClientBuilder::new() +fn new_client(port: u16, quick_timeout: bool) -> Client { + let builder = ClientBuilder::new() .application_name("integration_client") .application_uri("x") - .pki_dir("./pki-client") + .pki_dir(format!("./pki-client/{port}")) .create_sample_keypair(true) .trust_server_certs(true) - .client() - .unwrap() + .session_retry_initial(Duration::from_millis(200)); + + let builder = if quick_timeout { + builder.session_retry_limit(1) + } else { + builder + }; + builder.client().unwrap() } -pub fn new_client_server(port: u16) -> (Client, Server) { - (new_client(port), new_server(port)) +pub fn new_client_server(port: u16, quick_timeout: bool) -> (Client, Server) { + (new_client(port, quick_timeout), new_server(port)) } #[derive(Debug, Clone, Copy, PartialEq)] @@ -373,30 +380,34 @@ pub enum ServerResponse { Finished(bool), } -pub fn perform_test( +pub async fn perform_test( client: Client, server: Server, client_test: Option, server_test: ST, ) where - CT: FnOnce(mpsc::Receiver, Client) + Send + 'static, - ST: FnOnce(mpsc::Receiver, Server) + Send + 'static, + CT: FnOnce(mpsc::UnboundedReceiver, Client) -> CFut + Send + 'static, + ST: FnOnce(mpsc::UnboundedReceiver, Server) -> SFut + Send + 'static, + CFut: Future + Send + 'static, + SFut: Future + Send + 'static, { opcua::console_logging::init(); - // Spawn the CLIENT thread - let (client_thread, tx_client_command, rx_client_response) = { + // Spawn the CLIENT future + let (client_fut, tx_client_command, mut rx_client_response) = { + println!("Begin test"); // Create channels for client command and response - let (tx_client_command, rx_client_command) = channel::(); - let (tx_client_response, rx_client_response) = channel::(); + let (tx_client_command, mut rx_client_command) = unbounded_channel::(); + let (tx_client_response, rx_client_response) = unbounded_channel::(); - let client_thread = thread::spawn(move || { - info!("Client test thread is running"); + let client_fut = tokio::task::spawn(async move { + println!("Enter client fut"); let result = if let Some(client_test) = client_test { // Wait for start command so we know server is ready - let msg = rx_client_command.recv().unwrap(); - assert_eq!(msg, ClientCommand::Start); + println!("Begin wait for client RX"); + let msg = rx_client_command.recv().await.unwrap(); + assert_eq!(msg, ClientCommand::Start); // Client is ready let _ = tx_client_response.send(ClientResponse::Ready); @@ -405,34 +416,33 @@ pub fn perform_test( let _ = tx_client_response.send(ClientResponse::Starting); - client_test(rx_client_command, client); + println!("Begin client test"); + client_test(rx_client_command, client).await; true } else { trace!("No client test"); true }; - info!( - "Client test has completed, sending ClientResponse::Finished({:?})", - result - ); let _ = tx_client_response.send(ClientResponse::Finished(result)); - info!("Client thread has finished"); }); - (client_thread, tx_client_command, rx_client_response) + (client_fut, tx_client_command, rx_client_response) }; - // Spawn the SERVER thread - let (server_thread, tx_server_command, rx_server_response) = { + // Spawn the SERVER future + let (server_fut, tx_server_command, mut rx_server_response) = { // Create channels for server command and response - let (tx_server_command, rx_server_command) = channel(); - let (tx_server_response, rx_server_response) = channel(); - let server_thread = thread::spawn(move || { - // Server thread + let (tx_server_command, rx_server_command) = unbounded_channel(); + let (tx_server_response, rx_server_response) = unbounded_channel(); + println!("Make server fut"); + let server_fut = tokio::task::spawn(async move { + println!("Begin server"); + // Server future info!("Server test thread is running"); let _ = tx_server_response.send(ServerResponse::Starting); let _ = tx_server_response.send(ServerResponse::Ready); - server_test(rx_server_command, server); + println!("Begin server test"); + server_test(rx_server_command, server).await; let result = true; info!( @@ -442,7 +452,7 @@ pub fn perform_test( let _ = tx_server_response.send(ServerResponse::Finished(result)); info!("Server thread has finished"); }); - (server_thread, tx_server_command, rx_server_response) + (server_fut, tx_server_command, rx_server_response) }; let start_time = Instant::now(); @@ -454,76 +464,78 @@ pub fn perform_test( let mut server_has_finished = false; let mut server_success = false; + let end_time = start_time + std::time::Duration::from_millis(timeout); + // Loop until either the client or the server has quit, or the timeout limit is reached while !client_has_finished || !server_has_finished { - // Timeout test - let now = Instant::now(); - let elapsed = now.duration_since(start_time.clone()); - if elapsed.as_millis() > timeout as u128 { - let _ = tx_client_command.send(ClientCommand::Quit); - let _ = tx_server_command.send(ServerCommand::Quit); - - error!("Test timed out after {} ms", elapsed.as_millis()); - error!("Running components:\n {}", { - let components = runtime_components!(); - components - .iter() - .cloned() - .collect::>() - .join("\n ") - }); - - panic!("Timeout"); - } - - // Check for a client response - if let Ok(response) = rx_client_response.try_recv() { - match response { - ClientResponse::Starting => { - info!("Client test is starting"); - } - ClientResponse::Ready => { - info!("Client is ready"); - } - ClientResponse::Finished(success) => { - info!("Client test finished, result = {:?}", success); - client_success = success; - client_has_finished = true; - if !server_has_finished { - info!("Telling the server to quit"); - let _ = tx_server_command.send(ServerCommand::Quit); + select! { + _ = tokio::time::sleep_until(end_time.into()) => { + let _ = tx_client_command.send(ClientCommand::Quit); + let _ = tx_server_command.send(ServerCommand::Quit); + + error!("Test timed out after {} ms", timeout); + error!("Running components:\n {}", { + let components = runtime_components!(); + components + .iter() + .cloned() + .collect::>() + .join("\n ") + }); + + server_success = false; + client_success = false; + + break; + } + response = rx_client_response.recv() => { + match response { + Some(ClientResponse::Starting) => { + info!("Client test is starting"); + } + Some(ClientResponse::Ready) => { + info!("Client is ready"); + } + Some(ClientResponse::Finished(success)) => { + info!("Client test finished, result = {:?}", success); + client_success = success; + client_has_finished = true; + if !server_has_finished { + info!("Telling the server to quit"); + let _ = tx_server_command.send(ServerCommand::Quit); + } + } + None => { } } } - } - - // Check for a server response - if let Ok(response) = rx_server_response.try_recv() { - match response { - ServerResponse::Starting => { - info!("Server test is starting"); - } - ServerResponse::Ready => { - info!("Server test is ready"); - // Tell the client to start - let _ = tx_client_command.send(ClientCommand::Start); - } - ServerResponse::Finished(success) => { - info!("Server test finished, result = {:?}", success); - server_success = success; - server_has_finished = true; + response = rx_server_response.recv() => { + match response { + Some(ServerResponse::Starting) => { + info!("Server test is starting"); + } + Some(ServerResponse::Ready) => { + info!("Server test is ready"); + // Tell the client to start + let _ = tx_client_command.send(ClientCommand::Start); + } + Some(ServerResponse::Finished(success)) => { + info!("Server test finished, result = {:?}", success); + server_success = success; + server_has_finished = true; + } + None => { + } } } } - - thread::sleep(time::Duration::from_millis(1000)); } info!("Joining on threads...."); // Threads should exit by now - let _ = client_thread.join(); - let _ = server_thread.join(); + let _ = client_fut.await.unwrap(); + let _ = server_fut.await.unwrap(); assert!(client_success); assert!(server_success); @@ -531,41 +543,46 @@ pub fn perform_test( info!("test complete") } -pub fn get_endpoints_client_test( +pub async fn get_endpoints_client_test( server_url: &str, _identity_token: IdentityToken, - _rx_client_command: mpsc::Receiver, + _rx_client_command: mpsc::UnboundedReceiver, client: Client, ) { - let endpoints = client.get_server_endpoints_from_url(server_url).unwrap(); + let endpoints = client + .get_server_endpoints_from_url(server_url) + .await + .unwrap(); // Value should match number of expected endpoints assert_eq!(endpoints.len(), 11); } -pub fn regular_client_test( - client_endpoint: T, +pub async fn regular_client_test( + client_endpoint: impl Into, identity_token: IdentityToken, - _rx_client_command: mpsc::Receiver, + _rx_client_command: mpsc::UnboundedReceiver, mut client: Client, -) where - T: Into, -{ +) { // Connect to the server let client_endpoint = client_endpoint.into(); info!( "Client will try to connect to endpoint {:?}", client_endpoint ); - let session = client - .connect_to_endpoint(client_endpoint, identity_token) + let (session, event_loop) = client + .new_session_from_endpoint(client_endpoint, identity_token) + .await .unwrap(); - let session = session.read(); + + let handle = event_loop.spawn(); + session.wait_for_connection().await; // Read the variable let mut values = { let read_nodes = vec![ReadValueId::from(v1_node_id())]; session .read(&read_nodes, TimestampsToReturn::Both, 1.0) + .await .unwrap() }; assert_eq!(values.len(), 1); @@ -573,57 +590,34 @@ pub fn regular_client_test( let value = values.remove(0).value; assert_eq!(value, Some(Variant::from(100))); - session.disconnect(); + session.disconnect().await.unwrap(); + handle.await.unwrap(); } -pub fn invalid_session_client_test( - client_endpoint: T, +pub async fn invalid_token_test( + client_endpoint: impl Into, identity_token: IdentityToken, - _rx_client_command: mpsc::Receiver, + _rx_client_command: mpsc::UnboundedReceiver, mut client: Client, -) where - T: Into, -{ +) { // Connect to the server let client_endpoint = client_endpoint.into(); info!( "Client will try to connect to endpoint {:?}", client_endpoint ); - let session = client - .connect_to_endpoint(client_endpoint, identity_token) + let (_, event_loop) = client + .new_session_from_endpoint(client_endpoint, identity_token) + .await .unwrap(); - let session = session.read(); - - // Read the variable and expect that to fail - let read_nodes = vec![ReadValueId::from(v1_node_id())]; - let status_code = session - .read(&read_nodes, TimestampsToReturn::Both, 1.0) - .unwrap_err(); - assert_eq!(status_code, StatusCode::BadSessionNotActivated); - - session.disconnect(); -} - -pub fn invalid_token_test( - client_endpoint: T, - identity_token: IdentityToken, - _rx_client_command: mpsc::Receiver, - mut client: Client, -) where - T: Into, -{ - // Connect to the server - let client_endpoint = client_endpoint.into(); - info!( - "Client will try to connect to endpoint {:?}", - client_endpoint - ); - let session = client.connect_to_endpoint(client_endpoint, identity_token); - assert!(session.is_err()); + let res = event_loop.spawn().await.unwrap(); + assert_eq!(res, StatusCode::BadUserAccessDenied); } -pub fn regular_server_test(rx_server_command: mpsc::Receiver, server: Server) { +pub async fn regular_server_test( + mut rx_server_command: mpsc::UnboundedReceiver, + server: Server, +) { trace!("Hello from server"); // Wrap the server - a little juggling is required to give one rc // to a thread while holding onto one. @@ -631,14 +625,14 @@ pub fn regular_server_test(rx_server_command: mpsc::Receiver, ser let server2 = server.clone(); // Server runs on its own thread - let t = thread::spawn(move || { + let t = tokio::task::spawn_blocking(move || { Server::run_server(server); info!("Server thread has finished"); }); // Listen for quit command, if we get one then finish loop { - if let Ok(command) = rx_server_command.recv() { + if let Some(command) = rx_server_command.recv().await { match command { ServerCommand::Quit => { // Tell the server to quit @@ -648,7 +642,7 @@ pub fn regular_server_test(rx_server_command: mpsc::Receiver, ser server.abort(); } // wait for server thread to quit - let _ = t.join(); + let _ = t.await.unwrap(); info!("2. ------------------------ Server has now terminated after quit"); break; } @@ -660,50 +654,56 @@ pub fn regular_server_test(rx_server_command: mpsc::Receiver, ser } } -pub fn connect_with_client_test(port: u16, client_test: CT) +pub async fn connect_with_client_test(port: u16, client_test: CT, quick_timeout: bool) where - CT: FnOnce(mpsc::Receiver, Client) + Send + 'static, + CT: FnOnce(mpsc::UnboundedReceiver, Client) -> Fut + Send + 'static, + Fut: Future + Send + 'static, { - let (client, server) = new_client_server(port); - perform_test(client, server, Some(client_test), regular_server_test); + let (client, server) = new_client_server(port, quick_timeout); + perform_test(client, server, Some(client_test), regular_server_test).await; } -pub fn connect_with_get_endpoints(port: u16) { +pub async fn connect_with_get_endpoints(port: u16) { connect_with_client_test( port, - move |rx_client_command: mpsc::Receiver, client: Client| { + move |rx_client_command: mpsc::UnboundedReceiver, client: Client| async move { get_endpoints_client_test( &endpoint_url(port, "/").as_ref(), IdentityToken::Anonymous, rx_client_command, client, - ); + ) + .await; }, - ); + false + ).await; } -pub fn connect_with_invalid_token( +pub async fn connect_with_invalid_token( port: u16, client_endpoint: EndpointDescription, identity_token: IdentityToken, ) { connect_with_client_test( port, - move |rx_client_command: mpsc::Receiver, client: Client| { - invalid_token_test(client_endpoint, identity_token, rx_client_command, client); + move |rx_client_command: mpsc::UnboundedReceiver, client: Client| async move { + invalid_token_test(client_endpoint, identity_token, rx_client_command, client).await; }, - ); + true + ) + .await; } -pub fn connect_with( +pub async fn connect_with( port: u16, client_endpoint: EndpointDescription, identity_token: IdentityToken, ) { connect_with_client_test( port, - move |rx_client_command: mpsc::Receiver, client: Client| { - regular_client_test(client_endpoint, identity_token, rx_client_command, client); + move |rx_client_command: mpsc::UnboundedReceiver, client: Client| async move { + regular_client_test(client_endpoint, identity_token, rx_client_command, client).await; }, - ); + false + ).await; } diff --git a/integration/src/tests.rs b/integration/src/tests.rs index 906357b90..43b4a057f 100644 --- a/integration/src/tests.rs +++ b/integration/src/tests.rs @@ -1,15 +1,14 @@ -use std::{ - sync::{mpsc, mpsc::channel, Arc}, - thread, -}; +use std::{sync::Arc, thread}; use chrono::Utc; use log::*; -use opcua::client::prelude::*; +use opcua::client::{Client, DataChangeCallback, IdentityToken}; use opcua::server::prelude::*; use opcua::sync::*; +use tokio::sync::mpsc::{self, unbounded_channel}; + use crate::harness::*; fn endpoint( @@ -121,7 +120,6 @@ fn endpoint_aes256sha256rsapss_sign_encrypt(port: u16) -> EndpointDescription { /// This is the most basic integration test starting the server on a thread, setting an abort flag /// and expecting the test to complete before it times out. #[test] -#[ignore] fn server_abort() { opcua::console_logging::init(); @@ -131,7 +129,7 @@ fn server_abort() { // This is pretty lame, but to tell if the thread has terminated or not, there is no try_join // so we will have the thread send a message when it is finishing via a receiver - let (tx, rx) = channel(); + let (tx, mut rx) = unbounded_channel(); let _t = thread::spawn(move || { // This should run & block until it is told to abort Server::run_server(server); @@ -164,11 +162,10 @@ fn server_abort() { /// Start a server, send a HELLO message but then wait for the server /// to timeout and drop the connection. -#[test] -#[ignore] -fn hello_timeout() { - use std::io::Read; - use std::net::TcpStream; +#[tokio::test] +async fn hello_timeout() { + use tokio::io::AsyncReadExt; + use tokio::net::TcpStream; let port = next_port(); // For this test we want to set the hello timeout to a low value for the sake of speed. @@ -177,27 +174,40 @@ fn hello_timeout() { // socket open for longer than the timeout period. The server is expected to close the socket for the // test to pass. - let client_test = move |_rx_client_command: mpsc::Receiver, _client: Client| { + let client_test = move |_rx_client_command: mpsc::UnboundedReceiver, + _client: Client| async move { // Client will open a socket, and sit there waiting for the socket to close, which should happen in under the timeout_wait_duration - let timeout_wait_duration = std::time::Duration::from_secs( - opcua::server::constants::DEFAULT_HELLO_TIMEOUT_SECONDS as u64 + 3, - ); + let timeout_wait_duration = std::time::Duration::from_secs(2); let host = crate::harness::hostname(); let address = (host.as_ref(), port); + let mut c = 0; + // Getting a connection can sometimes take a few tries, since the server reports it is + // ready before it actually is in some cases. + let mut stream = loop { + let stream = TcpStream::connect(address).await; + if let Ok(stream) = stream { + break stream; + } + c += 1; + if c >= 10 { + panic!("Failed to connect to server"); + } + + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + }; debug!("Client is going to connect to port {:?}", address); - let mut stream = TcpStream::connect(address).unwrap(); let mut buf = [0u8]; // Spin around for the timeout to finish and then try using the socket to see if it is still open. let start = std::time::Instant::now(); loop { - thread::sleep(std::time::Duration::from_millis(100)); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; let now = std::time::Instant::now(); if now - start > timeout_wait_duration { debug!("Timeout wait duration has passed, so trying to read from the socket"); - let result = stream.read(&mut buf); + let result = stream.read(&mut buf).await; match result { Ok(v) => { if v > 0 { @@ -223,282 +233,279 @@ fn hello_timeout() { } }; - let (client, server) = new_client_server(port); - perform_test(client, server, Some(client_test), regular_server_test); + let (client, server) = new_client_server(port, false); + perform_test(client, server, Some(client_test), regular_server_test).await; } /// Start a server, fetch a list of endpoints, verify they are correct -#[test] -#[ignore] -fn get_endpoints() { +#[tokio::test] +async fn get_endpoints() { + println!("Enter test"); // Connect to server and get a list of endpoints - connect_with_get_endpoints(next_port()); + connect_with_get_endpoints(next_port()).await; } /// Connect to the server using no encryption, anonymous -#[test] -#[ignore] -fn connect_none() { +#[tokio::test] +async fn connect_none() { // Connect a session using None security policy and anonymous token. let port = next_port(); - connect_with(port, endpoint_none(port), IdentityToken::Anonymous); + connect_with(port, endpoint_none(port), IdentityToken::Anonymous).await; } /// Connect to the server using Basic128Rsa15 + Sign -#[test] -#[ignore] -fn connect_basic128rsa15_sign() { +#[tokio::test] +async fn connect_basic128rsa15_sign() { // Connect a session with Basic128Rsa and Sign let port = next_port(); connect_with( port, endpoint_basic128rsa15_sign(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Basic128Rsa15 + SignEncrypt -#[test] -#[ignore] -fn connect_basic128rsa15_sign_and_encrypt() { +#[tokio::test] +async fn connect_basic128rsa15_sign_and_encrypt() { // Connect a session with Basic128Rsa and SignAndEncrypt let port = next_port(); connect_with( port, endpoint_basic128rsa15_sign_encrypt(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Basic256 + Sign -#[test] -#[ignore] -fn connect_basic256_sign() { +#[tokio::test] +async fn connect_basic256_sign() { // Connect a session with Basic256 and Sign let port = next_port(); - connect_with(port, endpoint_basic256_sign(port), IdentityToken::Anonymous); + connect_with(port, endpoint_basic256_sign(port), IdentityToken::Anonymous).await; } /// Connect to the server using Basic256 + SignEncrypt -#[test] -#[ignore] -fn connect_basic256_sign_and_encrypt() { +#[tokio::test] +async fn connect_basic256_sign_and_encrypt() { // Connect a session with Basic256 and SignAndEncrypt let port = next_port(); connect_with( port, endpoint_basic256_sign_encrypt(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Basic256Sha256 + Sign -#[test] -#[ignore] -fn connect_basic256sha256_sign() { +#[tokio::test] +async fn connect_basic256sha256_sign() { // Connect a session with Basic256Sha256 and Sign let port = next_port(); connect_with( port, endpoint_basic256sha256_sign(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Basic256Sha256 + SignEncrypt -#[test] -#[ignore] -fn connect_basic256sha256_sign_and_encrypt() { +#[tokio::test] +async fn connect_basic256sha256_sign_and_encrypt() { let port = next_port(); connect_with( port, endpoint_basic256sha256_sign_encrypt(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Aes128Sha256RsaOaep + Sign -#[test] -#[ignore] -fn connect_aes128sha256rsaoaep_sign() { +#[tokio::test] +async fn connect_aes128sha256rsaoaep_sign() { let port = next_port(); connect_with( port, endpoint_aes128sha256rsaoaep_sign(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Aes128Sha256RsaOaep + SignEncrypt -#[test] -#[ignore] -fn connect_aes128sha256rsaoaep_sign_encrypt() { +#[tokio::test] +async fn connect_aes128sha256rsaoaep_sign_encrypt() { let port = next_port(); connect_with( port, endpoint_aes128sha256rsaoaep_sign_encrypt(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Aes128Sha256RsaOaep + Sign -#[test] -#[ignore] -fn connect_aes256sha256rsapss_sign() { +#[tokio::test] +async fn connect_aes256sha256rsapss_sign() { let port = next_port(); connect_with( port, endpoint_aes256sha256rsapss_sign(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server using Aes128Sha256RsaOaep + SignEncrypt -#[test] -#[ignore] -fn connect_aes256sha256rsapss_sign_encrypt() { +#[tokio::test] +async fn connect_aes256sha256rsapss_sign_encrypt() { let port = next_port(); connect_with( port, endpoint_aes256sha256rsapss_sign_encrypt(port), IdentityToken::Anonymous, - ); + ) + .await; } /// Connect to the server user/pass -#[test] -#[ignore] -fn connect_basic128rsa15_with_username_password() { +#[tokio::test] +async fn connect_basic128rsa15_with_username_password() { // Connect a session using username/password token let port = next_port(); connect_with( port, endpoint_basic128rsa15_sign_encrypt(port), client_user_token(), - ); + ) + .await; } /// Connect a session using an invalid username/password token and expect it to fail -#[test] -#[ignore] -fn connect_basic128rsa15_with_invalid_username_password() { +#[tokio::test] +async fn connect_basic128rsa15_with_invalid_username_password() { let port = next_port(); connect_with_invalid_token( port, endpoint_basic128rsa15_sign_encrypt(port), client_invalid_user_token(), - ); + ) + .await; } /// Connect a session using an X509 key and certificate -#[test] -#[ignore] -fn connect_basic128rsa15_with_x509_token() { +#[tokio::test] +async fn connect_basic128rsa15_with_x509_token() { let port = next_port(); connect_with( port, endpoint_basic128rsa15_sign_encrypt(port), client_x509_token(), - ); + ) + .await; } /// Connect to a server, read a variable, write a value to the variable, read the variable to verify it changed -#[test] -#[ignore] -fn read_write_read() { +#[tokio::test] +async fn read_write_read() { let port = next_port(); let client_endpoint = endpoint_basic128rsa15_sign_encrypt(port); let identity_token = client_x509_token(); connect_with_client_test( port, - move |_rx_client_command: mpsc::Receiver, mut client: Client| { + move |_rx_client_command: mpsc::UnboundedReceiver, mut client: Client| async move { info!( "Client will try to connect to endpoint {:?}", client_endpoint ); - let session = client - .connect_to_endpoint(client_endpoint, identity_token) + let (session, event_loop) = client + .new_session_from_endpoint(client_endpoint, identity_token) + .await .unwrap(); + let handle = event_loop.spawn(); + session.wait_for_connection().await; + let node_id = stress_node_id(1); // Read the existing value - { - let session = session.read(); - let results = session - .read(&[node_id.clone().into()], TimestampsToReturn::Both, 1.0) - .unwrap(); - let value = &results[0]; - debug!("value = {:?}", value); - assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(0)) - } + let results = session + .read(&[node_id.clone().into()], TimestampsToReturn::Both, 1.0) + .await + .unwrap(); + let value = &results[0]; + debug!("value = {:?}", value); + assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(0)); - { - let session = session.read(); - let results = session - .write(&[WriteValue { - node_id: node_id.clone(), - attribute_id: AttributeId::Value as u32, - index_range: UAString::null(), - value: Variant::Int32(1).into(), - }]) - .unwrap(); - let value = results[0]; - assert_eq!(value, StatusCode::Good); - } + let results = session + .write(&[WriteValue { + node_id: node_id.clone(), + attribute_id: AttributeId::Value as u32, + index_range: UAString::null(), + value: Variant::Int32(1).into(), + }]) + .await + .unwrap(); + let value = results[0]; + assert_eq!(value, StatusCode::Good); - { - let session = session.read(); - let results = session - .read(&[node_id.into()], TimestampsToReturn::Both, 1.0) - .unwrap(); - let value = &results[0]; - assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(1)) - } + let results = session + .read(&[node_id.into()], TimestampsToReturn::Both, 1.0) + .await + .unwrap(); + let value = &results[0]; + assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(1)); - { - let session = session.read(); - session.disconnect(); - } + session.disconnect().await.unwrap(); + handle.await.unwrap(); }, - ); + false + ).await; } /// Connect with the server and attempt to subscribe and monitor 1000 variables -#[test] -#[ignore] -fn subscribe_1000() { +#[tokio::test] +async fn subscribe_1000() { let port = next_port(); let client_endpoint = endpoint_basic128rsa15_sign_encrypt(port); let identity_token = client_x509_token(); connect_with_client_test( port, - move |_rx_client_command: mpsc::Receiver, mut client: Client| { + move |_rx_client_command: mpsc::UnboundedReceiver, mut client: Client| async move { info!( "Client will try to connect to endpoint {:?}", client_endpoint ); - let session = client - .connect_to_endpoint(client_endpoint, identity_token) + let (session, event_loop) = client + .new_session_from_endpoint(client_endpoint, identity_token) + .await .unwrap(); - let session = session.read(); + + let handle = event_loop.spawn(); + session.wait_for_connection().await; let start_time = Utc::now(); // Create subscription let subscription_id = session .create_subscription( - 2000.0f64, + std::time::Duration::from_secs(2), 100, 100, 0, 0, true, - DataChangeCallback::new(|_| { + DataChangeCallback::new(|_, _| { panic!("This shouldn't be called"); }), ) + .await .unwrap(); // NOTE: There is a default limit of 1000 items in arrays, so this list will go from 1 to 1000 inclusive @@ -525,7 +532,8 @@ fn subscribe_1000() { error!("Elapsed time = {}ms", elapsed.num_milliseconds()); let results = session - .create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create) + .create_monitored_items(subscription_id, TimestampsToReturn::Both, items_to_create) + .await .unwrap(); results.iter().enumerate().for_each(|(i, result)| { if i == 999 { @@ -537,29 +545,33 @@ fn subscribe_1000() { } }); - session.disconnect(); + session.disconnect().await.unwrap(); + handle.await.unwrap(); }, - ); + false + ).await; } -#[test] -#[ignore] -fn method_call() { +#[tokio::test] +async fn method_call() { // Call a method on the server, one exercising some parameters in and out let port = next_port(); let client_endpoint = endpoint_none(port); connect_with_client_test( port, - move |_rx_client_command: mpsc::Receiver, mut client: Client| { + move |_rx_client_command: mpsc::UnboundedReceiver, mut client: Client| async move { info!( "Client will try to connect to endpoint {:?}", client_endpoint ); - let session = client - .connect_to_endpoint(client_endpoint, IdentityToken::Anonymous) + let (session, event_loop) = client + .new_session_from_endpoint(client_endpoint, IdentityToken::Anonymous) + .await .unwrap(); - let session = session.read(); + + let handle = event_loop.spawn(); + session.wait_for_connection().await; // Call the method let input_arguments = Some(vec![Variant::from("Foo")]); @@ -568,7 +580,7 @@ fn method_call() { method_id: hellox_method_id(), input_arguments, }; - let result = session.call(method).unwrap(); + let result = session.call(method).await.unwrap(); // Result should say "Hello Foo" assert!(result.status_code.is_good()); @@ -577,7 +589,9 @@ fn method_call() { let msg = output_args.get(0).unwrap(); assert_eq!(msg.to_string(), "Hello Foo!"); - session.disconnect(); + session.disconnect().await.unwrap(); + handle.await.unwrap(); }, - ); + false + ).await; } diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 90c9a470b..f157bca91 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -73,7 +73,11 @@ env_logger = { version = "0.10", optional = true } actix-web = { version = "4.4", optional = true } actix-files = { version = "0.6", optional = true } arbitrary = { version = "1", optional = true, features = ["derive"] } +arc-swap = "1.6.0" [dev-dependencies] tempdir = "0.3" serde_json = "1.0" + +# Include console-logging when building tests +opcua = { path = ".", features = ["console-logging"] } diff --git a/lib/src/client/builder.rs b/lib/src/client/builder.rs index f8a09491c..e802f15c1 100644 --- a/lib/src/client/builder.rs +++ b/lib/src/client/builder.rs @@ -1,56 +1,14 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::path::PathBuf; - -use crate::client::{client::Client, config::*}; -use crate::core::config::Config; - -/// The `ClientBuilder` is a builder for producing a [`Client`]. It is an alternative to constructing -/// a [`ClientConfig`] from file or from scratch. -/// -/// # Example -/// -/// ```no_run -/// use opcua::client::prelude::*; -/// -/// fn main() { -/// let builder = ClientBuilder::new() -/// .application_name("OPC UA Sample Client") -/// .application_uri("urn:SampleClient") -/// .pki_dir("./pki") -/// .endpoints(vec![ -/// ("sample_endpoint", ClientEndpoint { -/// url: String::from("opc.tcp://127.0.0.1:4855/"), -/// security_policy: String::from(SecurityPolicy::None.to_str()), -/// security_mode: String::from(MessageSecurityMode::None), -/// user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), -/// }), -/// ]) -/// .default_endpoint("sample_endpoint") -/// .create_sample_keypair(true) -/// .trust_server_certs(true) -/// .user_token("sample_user", ClientUserToken::user_pass("sample1", "sample1pwd")); -/// let client = builder.client().unwrap(); -/// } -/// ``` -/// -/// [`Client`]: ../client/struct.Client.html -/// [`ClientConfig`]: ../config/struct.ClientConfig.html -/// +use std::{path::PathBuf, time::Duration}; + +use crate::server::prelude::Config; + +use super::{Client, ClientConfig, ClientEndpoint, ClientUserToken, ANONYMOUS_USER_TOKEN_ID}; + +#[derive(Default)] pub struct ClientBuilder { config: ClientConfig, } -impl Default for ClientBuilder { - fn default() -> Self { - ClientBuilder { - config: ClientConfig::default(), - } - } -} - impl ClientBuilder { /// Creates a `ClientBuilder` pub fn new() -> ClientBuilder { @@ -58,10 +16,7 @@ impl ClientBuilder { } /// Creates a `ClientBuilder` using a configuration file as the initial state. - pub fn from_config(path: T) -> Result - where - T: Into, - { + pub fn from_config(path: impl Into) -> Result { Ok(ClientBuilder { config: ClientConfig::load(&path.into())?, }) @@ -70,7 +25,7 @@ impl ClientBuilder { /// Yields a [`Client`] from the values set by the builder. If the builder is not in a valid state /// it will return `None`. /// - /// [`Client`]: ../client/struct.Client.html + /// [`Client`]: client/struct.Client.html pub fn client(self) -> Option { if self.is_valid() { Some(Client::new(self.config)) @@ -92,28 +47,19 @@ impl ClientBuilder { } /// Sets the application name. - pub fn application_name(mut self, application_name: T) -> Self - where - T: Into, - { + pub fn application_name(mut self, application_name: impl Into) -> Self { self.config.application_name = application_name.into(); self } /// Sets the application uri - pub fn application_uri(mut self, application_uri: T) -> Self - where - T: Into, - { + pub fn application_uri(mut self, application_uri: impl Into) -> Self { self.config.application_uri = application_uri.into(); self } /// Sets the product uri. - pub fn product_uri(mut self, product_uri: T) -> Self - where - T: Into, - { + pub fn product_uri(mut self, product_uri: impl Into) -> Self { self.config.product_uri = product_uri.into(); self } @@ -128,10 +74,7 @@ impl ClientBuilder { /// Sets a custom client certificate path. The path is required to be provided as a partial /// path relative to the PKI directory. If set, this path will be used to read the client /// certificate from disk. The certificate can be in either the .der or .pem format. - pub fn certificate_path(mut self, certificate_path: T) -> Self - where - T: Into, - { + pub fn certificate_path(mut self, certificate_path: impl Into) -> Self { self.config.certificate_path = Some(certificate_path.into()); self } @@ -139,10 +82,7 @@ impl ClientBuilder { /// Sets a custom private key path. The path is required to be provided as a partial path /// relative to the PKI directory. If set, this path will be used to read the private key /// from disk. - pub fn private_key_path(mut self, private_key_path: T) -> Self - where - T: Into, - { + pub fn private_key_path(mut self, private_key_path: impl Into) -> Self { self.config.private_key_path = Some(private_key_path.into()); self } @@ -167,10 +107,7 @@ impl ClientBuilder { /// Sets the pki directory where client's own key pair is stored and where `/trusted` and /// `/rejected` server certificates are stored. - pub fn pki_dir(mut self, pki_dir: T) -> Self - where - T: Into, - { + pub fn pki_dir(mut self, pki_dir: impl Into) -> Self { self.config.pki_dir = pki_dir.into(); self } @@ -183,28 +120,19 @@ impl ClientBuilder { } /// Sets the id of the default endpoint to connect to. - pub fn default_endpoint(mut self, endpoint_id: T) -> Self - where - T: Into, - { + pub fn default_endpoint(mut self, endpoint_id: impl Into) -> Self { self.config.default_endpoint = endpoint_id.into(); self } /// Adds an endpoint to the list of endpoints the client knows of. - pub fn endpoint(mut self, endpoint_id: T, endpoint: ClientEndpoint) -> Self - where - T: Into, - { + pub fn endpoint(mut self, endpoint_id: impl Into, endpoint: ClientEndpoint) -> Self { self.config.endpoints.insert(endpoint_id.into(), endpoint); self } /// Adds multiple endpoints to the list of endpoints the client knows of. - pub fn endpoints(mut self, endpoints: Vec<(T, ClientEndpoint)>) -> Self - where - T: Into, - { + pub fn endpoints(mut self, endpoints: Vec<(impl Into, ClientEndpoint)>) -> Self { for e in endpoints { self.config.endpoints.insert(e.0.into(), e.1); } @@ -212,10 +140,11 @@ impl ClientBuilder { } /// Adds a user token to the list supported by the client. - pub fn user_token(mut self, user_token_id: T, user_token: ClientUserToken) -> Self - where - T: Into, - { + pub fn user_token( + mut self, + user_token_id: impl Into, + user_token: ClientUserToken, + ) -> Self { let user_token_id = user_token_id.into(); if user_token_id == ANONYMOUS_USER_TOKEN_ID { panic!("User token id {} is reserved", user_token_id); @@ -224,7 +153,53 @@ impl ClientBuilder { self } + /// Sets the maximum outgoing message size in bytes. 0 means no limit. + pub fn max_message_size(mut self, max_message_size: usize) -> Self { + self.config.decoding_options.max_message_size = max_message_size; + self + } + + /// Sets the maximum number of chunks in an outgoing message. 0 means no limit. + pub fn max_chunk_count(mut self, max_chunk_count: usize) -> Self { + self.config.decoding_options.max_chunk_count = max_chunk_count; + self + } + + /// Maximum size of each individual outgoing message chunk. + pub fn max_chunk_size(mut self, max_chunk_size: usize) -> Self { + self.config.decoding_options.max_chunk_size = max_chunk_size; + self + } + + /// Maximum size of each incoming chunk. + pub fn max_incoming_chunk_size(mut self, max_incoming_chunk_size: usize) -> Self { + self.config.decoding_options.max_incoming_chunk_size = max_incoming_chunk_size; + self + } + + /// Maximum length in bytes of a string. 0 actually means 0, i.e. no string permitted. + pub fn max_string_length(mut self, max_string_length: usize) -> Self { + self.config.decoding_options.max_string_length = max_string_length; + self + } + + /// Maximum length in bytes of a byte string. 0 actually means 0, i.e. no byte strings permitted. + pub fn max_byte_string_length(mut self, max_byte_string_length: usize) -> Self { + self.config.decoding_options.max_byte_string_length = max_byte_string_length; + self + } + + /// Maximum number of array elements. 0 actually means 0, i.e. no array permitted + pub fn max_array_length(mut self, max_array_length: usize) -> Self { + self.config.decoding_options.max_array_length = max_array_length; + self + } + /// Sets the session retry limit. + /// + /// # Panics + /// + /// Panics if `session_retry_limit` is less -1. pub fn session_retry_limit(mut self, session_retry_limit: i32) -> Self { if session_retry_limit < 0 && session_retry_limit != -1 { panic!("Session retry limit must be -1, 0 or a positive number"); @@ -233,13 +208,51 @@ impl ClientBuilder { self } - /// Sets the session retry interval. - pub fn session_retry_interval(mut self, session_retry_interval: u32) -> Self { - self.config.session_retry_interval = session_retry_interval; + /// Initial time between retries when backing off on session reconnects. + pub fn session_retry_initial(mut self, session_retry_initial: Duration) -> Self { + self.config.session_retry_initial = session_retry_initial; + self + } + + /// Maximum time between retries when backing off on session reconnects. + pub fn session_retry_max(mut self, session_retry_max: Duration) -> Self { + self.config.session_retry_max = session_retry_max; + self + } + + /// Time between making simple Read requests to the server to check for liveness + /// and avoid session timeouts. + pub fn keep_alive_interval(mut self, keep_alive_interval: Duration) -> Self { + self.config.keep_alive_interval = keep_alive_interval; + self + } + + /// Set the timeout on requests sent to the server. + pub fn request_timeout(mut self, request_timeout: Duration) -> Self { + self.config.request_timeout = request_timeout; + self + } + + /// Set the timeout on publish requests sent to the server. + pub fn publish_timeout(mut self, publish_timeout: Duration) -> Self { + self.config.publish_timeout = publish_timeout; + self + } + + /// Set the lowest allowed publishing interval by the client. + /// The server may also enforce its own minimum. + pub fn min_publish_interval(mut self, min_publish_interval: Duration) -> Self { + self.config.min_publish_interval = min_publish_interval; + self + } + + /// Maximum number of pending publish requests. + pub fn max_inflight_publish(mut self, max_inflight_publish: usize) -> Self { + self.config.max_inflight_publish = max_inflight_publish; self } - /// Sets the session timeout period. + /// Sets the session timeout period, in milliseconds. pub fn session_timeout(mut self, session_timeout: u32) -> Self { self.config.session_timeout = session_timeout; self @@ -252,87 +265,23 @@ impl ClientBuilder { self } - /// Configures the client to use a single-threaded executor. This reduces the number of - /// threads used by the client. - pub fn single_threaded_executor(mut self) -> Self { - self.config.performance.single_threaded_executor = true; + /// When a session is recreated on the server, the client will attempt to + /// transfer monitored subscriptions from the old session to the new. + /// This is the maximum number of monitored items to create per request. + pub fn recreate_monitored_items_chunk(mut self, recreate_monitored_items_chunk: usize) -> Self { + self.config.performance.recreate_monitored_items_chunk = recreate_monitored_items_chunk; self } - /// Configures the client to use a multi-threaded executor. - pub fn multi_threaded_executor(mut self) -> Self { - self.config.performance.single_threaded_executor = false; + /// Maximum number of inflight messages. + pub fn max_inflight_messages(mut self, max_inflight_messages: usize) -> Self { + self.config.performance.max_inflight_messages = max_inflight_messages; self } /// Session name - the default name to use for a new session - pub fn session_name(mut self, session_name: T) -> Self - where - T: Into, - { + pub fn session_name(mut self, session_name: impl Into) -> Self { self.config.session_name = session_name.into(); self } - - /// Set the maximum message size - pub fn max_message_size(mut self, max_message_size: usize) -> Self { - self.config.decoding_options.max_message_size = max_message_size; - self - } - - /// Set the max chunk count - pub fn max_chunk_count(mut self, max_chunk_count: usize) -> Self { - self.config.decoding_options.max_chunk_count = max_chunk_count; - self - } -} - -#[test] -fn client_builder() { - use std::str::FromStr; - - // The builder should produce a config that reflects the values that are explicitly set upon it. - let b = ClientBuilder::new() - .application_name("appname") - .application_uri("http://appname") - .product_uri("http://product") - .create_sample_keypair(true) - .certificate_path("certxyz") - .private_key_path("keyxyz") - .trust_server_certs(true) - .verify_server_certs(false) - .pki_dir("pkixyz") - .preferred_locales(vec!["a".to_string(), "b".to_string(), "c".to_string()]) - .default_endpoint("http://default") - .session_retry_interval(1234) - .session_retry_limit(999) - .session_timeout(777) - .ignore_clock_skew() - .single_threaded_executor() - .session_name("SessionName") - // TODO user tokens, endpoints - ; - - let c = b.config(); - - assert_eq!(c.application_name, "appname"); - assert_eq!(c.application_uri, "http://appname"); - assert_eq!(c.product_uri, "http://product"); - assert_eq!(c.create_sample_keypair, true); - assert_eq!(c.certificate_path, Some(PathBuf::from("certxyz"))); - assert_eq!(c.private_key_path, Some(PathBuf::from("keyxyz"))); - assert_eq!(c.trust_server_certs, true); - assert_eq!(c.verify_server_certs, false); - assert_eq!(c.pki_dir, PathBuf::from_str("pkixyz").unwrap()); - assert_eq!( - c.preferred_locales, - vec!["a".to_string(), "b".to_string(), "c".to_string()] - ); - assert_eq!(c.default_endpoint, "http://default"); - assert_eq!(c.session_retry_interval, 1234); - assert_eq!(c.session_retry_limit, 999); - assert_eq!(c.session_timeout, 777); - assert_eq!(c.performance.ignore_clock_skew, true); - assert_eq!(c.performance.single_threaded_executor, true); - assert_eq!(c.session_name, "SessionName"); } diff --git a/lib/src/client/callbacks.rs b/lib/src/client/callbacks.rs deleted file mode 100644 index 68a1ce3d9..000000000 --- a/lib/src/client/callbacks.rs +++ /dev/null @@ -1,161 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides callback traits and concrete implementations that the client can use to register for notifications -//! with the client api. -//! -//! For example, the client must supply an [`OnSubscriptionNotification`] implementation when it calls `Session::create_subscription`. -//! It could implement this trait for itself, or it can use the concrete implementations in [`DataChangeCallback`] and [`EventCallback`]. -//! -//! [`DataChangeCallback`]: ./struct.DataChangeCallback.html -//! [`EventCallback`]: ./struct.EventCallback.html - -use std::fmt; - -use crate::types::{service_types::EventNotificationList, status_code::StatusCode}; - -use super::subscription::MonitoredItem; - -/// The `OnSubscriptionNotification` trait is the callback registered along with a new subscription to -/// receive subscription notification callbacks. -/// -/// Unless your subscription contains a mix of items which are monitoring data and events -/// you probably only need to implement either `data_change()`, or `event()` and leave the default, -/// no-op implementation for the other. -/// -/// There are concrete implementations of this trait in [`DataChangeCallback`] and [`EventCallback`]. -/// -/// [`DataChangeCallback`]: ./struct.DataChangeCallback.html -/// [`EventCallback`]: ./struct.EventCallback.html -/// -pub trait OnSubscriptionNotification { - /// Called by the subscription after a `DataChangeNotification`. The default implementation - /// does nothing. - fn on_data_change(&mut self, _data_change_items: &[&MonitoredItem]) {} - - /// Called by the subscription after a `EventNotificationList`. The notifications contained within - /// are individual `EventFieldList` structs filled from the select clause criteria from when the - /// event was constructed. The default implementation does nothing. - fn on_event(&mut self, _events: &EventNotificationList) {} -} - -/// The `OnConnectionStatusChange` trait can be used to register on the session to be notified -/// of connection status change notifications. -pub trait OnConnectionStatusChange { - /// Called when the connection status changes from connected to disconnected or vice versa - fn on_connection_status_change(&mut self, connected: bool); -} - -/// The `OnSessionClosed` trait can be used to register on a session and called to notify the client -/// that the session has closed. -pub trait OnSessionClosed { - /// Called when the connection closed (in addition to a status change event). The status - /// code should be checked to see if the closure was a graceful terminate (`Good`), or the result - /// of a network or protocol error. - /// - /// If no session retry policy has been created for the client session, the server implementation - /// might choose to reconnect in response to a bad status code by itself, however it should - /// avoid retrying too quickly or indefinitely in case the error is permanent. - fn on_session_closed(&mut self, status_code: StatusCode); -} - -/// This is a concrete implementation of [`OnSubscriptionNotification`] that calls a function when -/// a data change occurs. -pub struct DataChangeCallback { - /// The actual call back - cb: Box, -} - -impl OnSubscriptionNotification for DataChangeCallback { - fn on_data_change(&mut self, data_change_items: &[&MonitoredItem]) { - (self.cb)(data_change_items); - } -} - -impl DataChangeCallback { - /// Constructs a callback from the supplied function - pub fn new(cb: CB) -> Self - where - CB: Fn(&[&MonitoredItem]) + Send + Sync + 'static, - { - Self { cb: Box::new(cb) } - } -} - -/// This is a concrete implementation of [`OnSubscriptionNotification`] that calls a function -/// when an event occurs. -pub struct EventCallback { - /// The actual call back - cb: Box, -} - -impl OnSubscriptionNotification for EventCallback { - fn on_event(&mut self, events: &EventNotificationList) { - (self.cb)(events); - } -} - -impl EventCallback { - /// Constructs a callback from the supplied function - pub fn new(cb: CB) -> Self - where - CB: Fn(&EventNotificationList) + Send + Sync + 'static, - { - Self { cb: Box::new(cb) } - } -} - -/// This is a concrete implementation of [`OnConnectionStatusChange`] that calls the supplied function. -pub struct ConnectionStatusCallback { - cb: Box, -} - -impl fmt::Debug for ConnectionStatusCallback { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "[callback]") - } -} - -impl OnConnectionStatusChange for ConnectionStatusCallback { - fn on_connection_status_change(&mut self, connected: bool) { - if connected { - debug!("Received OPC UA connected event"); - } else { - debug!("Received OPC UA disconnected event"); - } - (self.cb)(connected); - } -} - -impl ConnectionStatusCallback { - // Constructor - pub fn new(cb: CB) -> Self - where - CB: FnMut(bool) + Send + Sync + 'static, - { - Self { cb: Box::new(cb) } - } -} - -/// This is a concrete implementation of `OnSessionClosed` that will call the supplied -/// function. -pub struct SessionClosedCallback { - cb: Box, -} - -impl OnSessionClosed for SessionClosedCallback { - fn on_session_closed(&mut self, status_code: StatusCode) { - (self.cb)(status_code); - } -} - -impl SessionClosedCallback { - // Constructor - pub fn new(cb: CB) -> Self - where - CB: FnMut(StatusCode) + Send + Sync + 'static, - { - Self { cb: Box::new(cb) } - } -} diff --git a/lib/src/client/client.rs b/lib/src/client/client.rs deleted file mode 100644 index 0c4b0ec3a..000000000 --- a/lib/src/client/client.rs +++ /dev/null @@ -1,755 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Client setup and session creation. - -use std::{path::PathBuf, str::FromStr, sync::Arc}; - -use chrono::Duration; - -use super::{ - config::{ClientConfig, ClientEndpoint, ANONYMOUS_USER_TOKEN_ID}, - session::{ - services::*, - session::{Session, SessionInfo}, - }, - session_retry_policy::SessionRetryPolicy, -}; - -use crate::{ - core::{ - comms::url::{ - hostname_from_url, is_opc_ua_binary_url, is_valid_opc_ua_url, - server_url_from_endpoint_url, url_matches_except_host, url_with_replaced_hostname, - }, - config::Config, - }, - crypto::{CertificateStore, SecurityPolicy}, - sync::RwLock, - trace_read_lock, - types::{ - service_types::{ApplicationDescription, EndpointDescription, RegisteredServer}, - status_code::StatusCode, - DecodingOptions, MessageSecurityMode, - }, -}; - -#[derive(Debug, Clone)] -pub enum IdentityToken { - /// Anonymous identity token - Anonymous, - /// User name and a password - UserName(String, String), - /// X5090 cert - a path to the cert.der, and private.pem - X509(PathBuf, PathBuf), -} - -/// The `Client` defines a connection that can be used to to get end points or establish -/// one or more sessions with an OPC UA server. It is constructed via a [`ClientBuilder`] or -/// from a described configuration [`ClientConfig`] that could be deserialized from file. -/// -/// You have a couple of choices when creating a client that connects to a server depending on whether -/// you know the endpoints up front. -/// -/// 1. Define all the endpoints you expect to connect with via your builder / config and then -/// use `connect_to_endpoint_id()` to connect to one of them by its id. This option assumes that your -/// client and the server it connects to are describing the same endpoints. It will not work if the server describes different endpoints -/// than the one in your config. -/// -/// 2. Define no endpoints and then call `connect_to_endpoint()` with an ad hoc endpoint description. -/// This is the suitable choice if your client can connect to a multitude of servers without -/// advance description of their endpoints. -/// -/// [`ClientConfig`]: ../config/struct.ClientConfig.html -/// [`ClientBuilder`]: ../builder/struct.ClientBuilder.html -/// -pub struct Client { - /// Client configuration - config: ClientConfig, - /// Certificate store is where certificates go. - certificate_store: Arc>, - /// The session retry policy for new sessions - session_retry_policy: SessionRetryPolicy, -} - -impl Drop for Client { - fn drop(&mut self) { - // TODO - this causes panics on unwrap - have to figure the reason out - // for session in self.sessions.iter_mut() { - // // Disconnect - // let mut session = trace_write_lock!(session.session); - // if session.is_connected() { - // session.disconnect() - // } - // } - } -} - -impl From for Client { - fn from(config: ClientConfig) -> Client { - Client::new(config) - } -} - -impl Client { - /// Creates a new [`Client`] instance from a [`ClientConfig`]. The configuration - /// defines the behaviour of the new client, which endpoints it recognizes, where it stores - /// certificates etc. - /// - /// A [`Client`] can be made directly or by using a [`ClientBuilder`]. - /// - /// # Example - /// - /// ```no_run - /// use std::path::PathBuf; - /// use opcua::client::prelude::*; - /// - /// fn main() { - /// let mut client = Client::new(ClientConfig::load(&PathBuf::from("./myclient.conf")).unwrap()); - /// if let Ok(session) = client.connect_to_endpoint_id(None) { - /// // .. - /// } - /// } - /// ``` - /// - /// [`Client`]: ./struct.Client.html - /// [`ClientConfig`]: ../config/struct.ClientConfig.html - /// [`ClientBuilder`]: ../config/struct.ClientBuilder.html - /// - pub fn new(config: ClientConfig) -> Client { - let application_description = if config.create_sample_keypair { - Some(config.application_description()) - } else { - None - }; - - let (mut certificate_store, client_certificate, client_pkey) = - CertificateStore::new_with_x509_data( - &config.pki_dir, - false, - config.certificate_path.as_deref(), - config.private_key_path.as_deref(), - application_description, - ); - if client_certificate.is_none() || client_pkey.is_none() { - error!("Client is missing its application instance certificate and/or its private key. Encrypted endpoints will not function correctly.") - } - - // Clients may choose to skip additional server certificate validations - certificate_store.set_skip_verify_certs(!config.verify_server_certs); - - // Clients may choose to auto trust servers to save some messing around with rejected certs - certificate_store.set_trust_unknown_certs(config.trust_server_certs); - - let session_timeout = config.session_timeout as f64; - - // The session retry policy dictates how many times to retry if connection to the server goes down - // and on what interval - let session_retry_policy = match config.session_retry_limit { - // Try forever - -1 => SessionRetryPolicy::infinity(session_timeout, config.session_retry_interval), - // Never try - 0 => SessionRetryPolicy::never(session_timeout), - // Try this many times - session_retry_limit => SessionRetryPolicy::new( - session_timeout, - session_retry_limit as u32, - config.session_retry_interval, - ), - }; - - Client { - config, - session_retry_policy, - certificate_store: Arc::new(RwLock::new(certificate_store)), - } - } - - /// Returns a filled OPC UA [`ApplicationDescription`] using information from the config - /// - /// [`ApplicationDescription`]: ../../opcua_types/service_types/application_description/struct.ApplicationDescription.html - /// - pub fn application_description(&self) -> ApplicationDescription { - self.config.application_description() - } - - /// Connects to a named endpoint that you have defined in the `ClientConfig` - /// and creates / activates a [`Session`] for that endpoint. Note that `GetEndpoints` is first - /// called on the server and it is expected to support the endpoint you intend to connect to. - /// - /// Returns with the session that has been established or an error. - /// - /// Important Note: The `Session` you receive from this call is protected because it is - /// accessed by multiple internal threads. You must scope lock calls to this session object and not - /// hold the lock for more than required. - /// - /// [`Session`]: ../session/struct.Session.html - /// - pub fn connect_to_endpoint_id( - &mut self, - endpoint_id: Option<&str>, - ) -> Result>, StatusCode> { - // Ask the server associated with the default endpoint for its list of endpoints - let endpoints = match self.get_server_endpoints() { - Result::Err(status_code) => { - error!("Cannot get endpoints for server, error - {}", status_code); - return Err(status_code); - } - Result::Ok(endpoints) => endpoints, - }; - - info!("Server has these endpoints:"); - endpoints.iter().for_each(|e| { - info!( - " {} - {:?} / {:?}", - e.endpoint_url, - SecurityPolicy::from_str(e.security_policy_uri.as_ref()).unwrap(), - e.security_mode - ) - }); - - // Create a session to an endpoint. If an endpoint id is specified use that - let session = if let Some(endpoint_id) = endpoint_id { - self.new_session_from_id(endpoint_id, &endpoints).unwrap() - } else { - self.new_session(&endpoints).unwrap() - }; - - { - // Connect to the server - let mut session = session.write(); - session.connect_and_activate().map_err(|err| { - error!("Got an error while creating the default session - {}", err); - err - })?; - } - - Ok(session) - } - - /// Connects to an ad-hoc server endpoint description. and creates / activates a [`Session`] for - /// that endpoint. - /// - /// Returns with the session that has been established or an error. - /// - /// Important Note: The `Session` you receive from this call is protected because it is - /// accessed by multiple internal threads. You must scope lock calls to this session object and not - /// hold the lock for more than required. - /// - /// [`Session`]: ../session/struct.Session.html - /// - pub fn connect_to_endpoint( - &mut self, - endpoint: T, - user_identity_token: IdentityToken, - ) -> Result>, StatusCode> - where - T: Into, - { - let endpoint = endpoint.into(); - - // Get the server endpoints - let server_url = endpoint.endpoint_url.as_ref(); - - let server_endpoints = - self.get_server_endpoints_from_url(server_url) - .map_err(|status_code| { - error!("Cannot get endpoints for server, error - {}", status_code); - status_code - })?; - - // Find the server endpoint that matches the one desired - let security_policy = SecurityPolicy::from_str(endpoint.security_policy_uri.as_ref()) - .map_err(|_| StatusCode::BadSecurityPolicyRejected)?; - let server_endpoint = Client::find_matching_endpoint( - &server_endpoints, - endpoint.endpoint_url.as_ref(), - security_policy, - endpoint.security_mode, - ) - .ok_or(StatusCode::BadTcpEndpointUrlInvalid) - .map_err(|status_code| { - error!( - "Cannot find matching endpoint for {}", - endpoint.endpoint_url.as_ref() - ); - status_code - })?; - - // Create a session - let session = self - .new_session_from_info((server_endpoint, user_identity_token)) - .unwrap(); - - { - // Connect to the server - let mut session = session.write(); - session.connect_and_activate().map_err(|err| { - error!("Got an error while creating the default session - {}", err); - err - })?; - } - - Ok(session) - } - - /// Gets the [`ClientEndpoint`] information for the default endpoint, as defined - /// by the configuration. If there is no default endpoint, this function will return an error. - /// - /// [`ClientEndpoint`]: ../config/struct.ClientEndpoint.html - /// - pub fn default_endpoint(&self) -> Result { - let default_endpoint_id = self.config.default_endpoint.clone(); - if default_endpoint_id.is_empty() { - Err("No default endpoint has been specified".to_string()) - } else if let Some(endpoint) = self.config.endpoints.get(&default_endpoint_id) { - Ok(endpoint.clone()) - } else { - Err(format!( - "Cannot find default endpoint with id {}", - default_endpoint_id - )) - } - } - - /// Creates a new [`Session`] using the default endpoint specified in the config. If - /// there is no default, or the endpoint does not exist, this function will return an error - /// - /// [`Session`]: ../session/struct.Session.html - /// - pub fn new_session( - &mut self, - endpoints: &[EndpointDescription], - ) -> Result>, String> { - let endpoint = self.default_endpoint()?; - self.new_session_from_endpoint(&endpoint, endpoints) - } - - /// Creates a new [`Session`] using the named endpoint id. If there is no - /// endpoint of that id in the config, this function will return an error - /// - /// [`Session`]: ../session/struct.Session.html - /// - pub fn new_session_from_id( - &mut self, - endpoint_id: T, - endpoints: &[EndpointDescription], - ) -> Result>, String> - where - T: Into, - { - let endpoint_id = endpoint_id.into(); - let endpoint = { - let endpoint = self.config.endpoints.get(&endpoint_id); - if endpoint.is_none() { - return Err(format!("Cannot find endpoint with id {}", endpoint_id)); - } - // This clone is an unfortunate workaround to a lifetime issue between the borrowed - // endpoint and the need to call the mutable new_session_from_endpoint() - endpoint.unwrap().clone() - }; - self.new_session_from_endpoint(&endpoint, endpoints) - } - - /// Creates a new [`Session`] using provided client endpoint and endpoint descriptions. - /// If the endpoint does not exist or is in error, this function will return an error. - /// - /// [`Session`]: ../session/struct.Session.html - /// - fn new_session_from_endpoint( - &mut self, - client_endpoint: &ClientEndpoint, - endpoints: &[EndpointDescription], - ) -> Result>, String> { - let session_info = self.session_info_for_endpoint(client_endpoint, endpoints)?; - self.new_session_from_info(session_info) - } - - /// Creates an ad hoc new [`Session`] using the specified endpoint url, security policy and mode. - /// - /// This function supports anything that implements `Into`, for example `EndpointDescription`. - /// - /// [`Session`]: ../session/struct.Session.html - /// - pub fn new_session_from_info( - &mut self, - session_info: T, - ) -> Result>, String> - where - T: Into, - { - let session_info = session_info.into(); - if !is_opc_ua_binary_url(session_info.endpoint.endpoint_url.as_ref()) { - Err(format!( - "Endpoint url {}, is not a valid / supported url", - session_info.endpoint.endpoint_url - )) - } else { - let session = Arc::new(RwLock::new(Session::new( - self.application_description(), - self.config.session_name.clone(), - self.certificate_store.clone(), - session_info, - self.session_retry_policy.clone(), - self.decoding_options(), - self.config.performance.ignore_clock_skew, - self.config.performance.single_threaded_executor, - ))); - Ok(session) - } - } - - /// Connects to the client's default configured endpoint asks the server for a list of - /// [`EndpointDescription`] that it hosts. If there is an error, the function will - /// return an error. - /// - /// [`EndpointDescription`]: ../../opcua_types/service_types/endpoint_description/struct.EndpointDescription.html - /// - pub fn get_server_endpoints(&self) -> Result, StatusCode> { - if let Ok(default_endpoint) = self.default_endpoint() { - if let Ok(server_url) = server_url_from_endpoint_url(&default_endpoint.url) { - self.get_server_endpoints_from_url(server_url) - } else { - error!( - "Cannot create a server url from the specified endpoint url {}", - default_endpoint.url - ); - Err(StatusCode::BadUnexpectedError) - } - } else { - error!("There is no default endpoint, so cannot get endpoints"); - Err(StatusCode::BadUnexpectedError) - } - } - - fn decoding_options(&self) -> DecodingOptions { - let decoding_options = &self.config.decoding_options; - DecodingOptions { - max_chunk_count: decoding_options.max_chunk_count, - max_message_size: decoding_options.max_message_size, - max_string_length: decoding_options.max_string_length, - max_byte_string_length: decoding_options.max_byte_string_length, - max_array_length: decoding_options.max_array_length, - client_offset: Duration::zero(), - ..Default::default() - } - } - - /// Connects to the specified server_url with a None/None connection and asks for a list of - /// [`EndpointDescription`] that it hosts. - /// - /// # Example - /// - /// ```no_run - /// use opcua::client::prelude::*; - /// use std::path::PathBuf; - /// - /// fn main() { - /// let mut client = Client::new(ClientConfig::load(&PathBuf::from("./myclient.conf")).unwrap()); - /// if let Ok(endpoints) = client.get_server_endpoints_from_url("opc.tcp://foo:1234") { - /// if let Some(endpoint) = Client::find_matching_endpoint(&endpoints, "opc.tcp://foo:1234/mypath", SecurityPolicy::None, MessageSecurityMode::None) { - /// //... - /// } - /// } - /// } - /// ``` - /// - /// [`EndpointDescription`]: ../../opcua_types/service_types/endpoint_description/struct.EndpointDescription.html - /// - pub fn get_server_endpoints_from_url( - &self, - server_url: T, - ) -> Result, StatusCode> - where - T: Into, - { - let server_url = server_url.into(); - if !is_opc_ua_binary_url(&server_url) { - Err(StatusCode::BadTcpEndpointUrlInvalid) - } else { - let preferred_locales = Vec::new(); - // Most of these fields mean nothing when getting endpoints - let endpoint = EndpointDescription::from(server_url.as_ref()); - let session_info = SessionInfo { - endpoint, - user_identity_token: IdentityToken::Anonymous, - preferred_locales, - }; - let session = Session::new( - self.application_description(), - self.config.session_name.clone(), - self.certificate_store.clone(), - session_info, - self.session_retry_policy.clone(), - self.decoding_options(), - self.config.performance.ignore_clock_skew, - self.config.performance.single_threaded_executor, - ); - session.connect()?; - let result = session.get_endpoints()?; - session.disconnect(); - Ok(result) - } - } - - /// Connects to a discovery server and asks the server for a list of - /// available server [`ApplicationDescription`]. - /// - /// [`ApplicationDescription`]: ../../opcua_types/service_types/application_description/struct.ApplicationDescription.html - /// - pub fn find_servers( - &mut self, - discovery_endpoint_url: T, - ) -> Result, StatusCode> - where - T: Into, - { - let discovery_endpoint_url = discovery_endpoint_url.into(); - debug!("find_servers, {}", discovery_endpoint_url); - let endpoint = EndpointDescription::from(discovery_endpoint_url.as_ref()); - let session = self.new_session_from_info(endpoint); - if let Ok(session) = session { - let session = trace_read_lock!(session); - // Connect & activate the session. - let connected = session.connect(); - if connected.is_ok() { - // Find me some some servers - let result = session - .find_servers(discovery_endpoint_url.clone()) - .map_err(|err| { - error!( - "Cannot find servers on discovery server {} - check this error - {:?}", - discovery_endpoint_url, err - ); - err - }); - session.disconnect(); - result - } else { - let result = connected.unwrap_err(); - error!( - "Cannot connect to {} - check this error - {}", - discovery_endpoint_url, result - ); - Err(result) - } - } else { - let result = StatusCode::BadUnexpectedError; - error!( - "Cannot create a sesion to {} - check if url is malformed", - discovery_endpoint_url - ); - Err(result) - } - } - - /// Called by servers that wish to register themselves with a discovery server. - /// - /// In this role, the server becomes the client of the discovery server, so it needs to connect - /// as a client, query the endpoints, establish a session, register its own endpoints and then - /// disconnect. - /// - /// The implementation of this function looks for the strongest endpoint of the discovery server - /// to register itself on. That makes it possible that the discovery server may reject the - /// connection if it does not trust the client. In that instance, it is up to the user to do - /// whatever is required to make the discovery server trust the registering server. - /// - /// For example the standard OPC foundation discovery server will drop the server's cert in a - /// `rejected/` folder on the filesystem and this cert has to be moved to a `trusted/certs/` folder. - pub fn register_server( - &mut self, - discovery_endpoint_url: T, - server: RegisteredServer, - ) -> Result<(), StatusCode> - where - T: Into, - { - let discovery_endpoint_url = discovery_endpoint_url.into(); - if !is_valid_opc_ua_url(&discovery_endpoint_url) { - error!( - "Discovery endpoint url \"{}\" is not a valid OPC UA url", - discovery_endpoint_url - ); - Err(StatusCode::BadTcpEndpointUrlInvalid) - } else { - // Get a list of endpoints from the discovery server - debug!("register_server({}, {:?}", discovery_endpoint_url, server); - let endpoints = self.get_server_endpoints_from_url(discovery_endpoint_url.clone())?; - if endpoints.is_empty() { - Err(StatusCode::BadUnexpectedError) - } else { - // Now choose the strongest endpoint to register through - if let Some(endpoint) = endpoints - .iter() - .filter(|e| self.is_supported_endpoint(*e)) - .max_by(|a, b| a.security_level.cmp(&b.security_level)) - { - debug!( - "Registering this server via discovery endpoint {:?}", - endpoint - ); - let session = self.new_session_from_info(endpoint.clone()); - if let Ok(session) = session { - let session = trace_read_lock!(session); - match session.connect() { - Ok(_) => { - // Register with the server - let result = session.register_server(server); - session.disconnect(); - result - } - Err(result) => { - error!( - "Cannot connect to {} - check this error - {}", - discovery_endpoint_url, result - ); - Err(result) - } - } - } else { - error!( - "Cannot create a sesion to {} - check if url is malformed", - discovery_endpoint_url - ); - Err(StatusCode::BadUnexpectedError) - } - } else { - error!("Cannot find an endpoint that we call register server on"); - Err(StatusCode::BadUnexpectedError) - } - } - } - } - - /// Determine if we recognize the security of this endpoint - fn is_supported_endpoint(&self, endpoint: &EndpointDescription) -> bool { - if let Ok(security_policy) = SecurityPolicy::from_str(endpoint.security_policy_uri.as_ref()) - { - !matches!(security_policy, SecurityPolicy::Unknown) - } else { - false - } - } - - /// Returns an identity token corresponding to the matching user in the configuration. Or None - /// if there is no matching token. - fn client_identity_token(&self, user_token_id: T) -> Option - where - T: Into, - { - let user_token_id = user_token_id.into(); - if user_token_id == ANONYMOUS_USER_TOKEN_ID { - Some(IdentityToken::Anonymous) - } else if let Some(token) = self.config.user_tokens.get(&user_token_id) { - if let Some(ref password) = token.password { - Some(IdentityToken::UserName( - token.user.clone(), - password.clone(), - )) - } else if let Some(ref cert_path) = token.cert_path { - token.private_key_path.as_ref().map(|private_key_path| { - IdentityToken::X509(PathBuf::from(cert_path), PathBuf::from(private_key_path)) - }) - } else { - None - } - } else { - None - } - } - - /// Find an endpoint supplied from the list of endpoints that matches the input criteria - pub fn find_matching_endpoint( - endpoints: &[EndpointDescription], - endpoint_url: &str, - security_policy: SecurityPolicy, - security_mode: MessageSecurityMode, - ) -> Option { - if security_policy == SecurityPolicy::Unknown { - panic!("Cannot match against unknown security policy"); - } - - let matching_endpoint = endpoints - .iter() - .find(|e| { - // Endpoint matches if the security mode, policy and url match - security_mode == e.security_mode - && security_policy == SecurityPolicy::from_uri(e.security_policy_uri.as_ref()) - && url_matches_except_host(endpoint_url, e.endpoint_url.as_ref()) - }) - .cloned(); - - // Issue #16, #17 - the server may advertise an endpoint whose hostname is inaccessible - // to the client so substitute the advertised hostname with the one the client supplied. - if let Some(mut matching_endpoint) = matching_endpoint { - if let Ok(hostname) = hostname_from_url(endpoint_url) { - if let Ok(new_endpoint_url) = - url_with_replaced_hostname(matching_endpoint.endpoint_url.as_ref(), &hostname) - { - matching_endpoint.endpoint_url = new_endpoint_url.into(); - Some(matching_endpoint) - } else { - None - } - } else { - None - } - } else { - None - } - } - - /// Creates a [`SessionInfo`](SessionInfo) information from the supplied client endpoint. - fn session_info_for_endpoint( - &self, - client_endpoint: &ClientEndpoint, - endpoints: &[EndpointDescription], - ) -> Result { - // Enumerate endpoints looking for matching one - if let Ok(security_policy) = SecurityPolicy::from_str(&client_endpoint.security_policy) { - let security_mode = MessageSecurityMode::from(client_endpoint.security_mode.as_ref()); - if security_mode != MessageSecurityMode::Invalid { - let endpoint_url = client_endpoint.url.clone(); - // Now find a matching endpoint from those on the server - let endpoint = Self::find_matching_endpoint( - endpoints, - &endpoint_url, - security_policy, - security_mode, - ); - if endpoint.is_none() { - Err(format!("Endpoint {}, {:?} / {:?} does not match against any supplied by the server", endpoint_url, security_policy, security_mode)) - } else if let Some(user_identity_token) = - self.client_identity_token(client_endpoint.user_token_id.clone()) - { - info!( - "Creating a session for endpoint {}, {:?} / {:?}", - endpoint_url, security_policy, security_mode - ); - let preferred_locales = self.config.preferred_locales.clone(); - Ok(SessionInfo { - endpoint: endpoint.unwrap(), - user_identity_token, - preferred_locales, - }) - } else { - Err(format!( - "Endpoint {} user id cannot be found", - client_endpoint.user_token_id - )) - } - } else { - Err(format!( - "Endpoint {} security mode {} is invalid", - client_endpoint.url, client_endpoint.security_mode - )) - } - } else { - Err(format!( - "Endpoint {} security policy {} is invalid", - client_endpoint.url, client_endpoint.security_policy - )) - } - } -} diff --git a/lib/src/client/comms/mod.rs b/lib/src/client/comms/mod.rs deleted file mode 100644 index 8da159e92..000000000 --- a/lib/src/client/comms/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Client side communications - -mod transport; - -pub(crate) mod tcp_transport; diff --git a/lib/src/client/comms/tcp_transport.rs b/lib/src/client/comms/tcp_transport.rs deleted file mode 100644 index 9f7ade87e..000000000 --- a/lib/src/client/comms/tcp_transport.rs +++ /dev/null @@ -1,621 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! The OPC UA TCP transport client module. The transport is responsible for establishing a connection -//! with the server and processing requests. -//! -//! Internally this uses Tokio to process requests and responses supplied by the session via the -//! session state. -use std::{ - collections::HashMap, - net::{SocketAddr, ToSocketAddrs}, - result::Result, - sync::Arc, - thread, time, -}; - -use futures::StreamExt; -use tokio::{ - self, - io::{AsyncWriteExt, ReadHalf, WriteHalf}, - net::TcpStream, - sync::mpsc::UnboundedReceiver, -}; -use tokio_util::codec::FramedRead; - -use crate::core::{ - comms::{ - message_chunk_info::ChunkInfo, - message_writer::MessageWriter, - tcp_codec::{Message, TcpCodec}, - tcp_types::HelloMessage, - url::hostname_port_from_url, - }, - prelude::*, -}; -use crate::sync::*; -use crate::types::status_code::StatusCode; - -use crate::client::{ - callbacks::OnSessionClosed, - comms::transport::Transport, - message_queue::{self, MessageQueue}, - session::session_state::{ConnectionState, ConnectionStateMgr, SessionState}, -}; - -//todo move this struct to core module -#[derive(Debug)] -struct MessageChunkWithChunkInfo { - header: ChunkInfo, - data_with_header: Vec, -} - -struct ReadState { - pub state: ConnectionStateMgr, - pub secure_channel: Arc>, - pub message_queue: Arc>, - pub max_chunk_count: usize, - /// Last decoded sequence number - last_received_sequence_number: u32, - chunks: HashMap>, - pub framed_read: FramedRead, TcpCodec>, -} - -impl Drop for ReadState { - fn drop(&mut self) { - info!("ReadState has dropped"); - } -} - -impl ReadState { - fn new( - connection_state: ConnectionStateMgr, - secure_channel: Arc>, - message_queue: Arc>, - session_state: &SessionState, - framed_read: FramedRead, TcpCodec>, - ) -> Self { - ReadState { - secure_channel, - state: connection_state, - max_chunk_count: session_state.max_chunk_count(), - last_received_sequence_number: 0, - message_queue, - chunks: HashMap::new(), - framed_read, - } - } - fn turn_received_chunks_into_message( - &mut self, - chunks: &[MessageChunk], - ) -> Result { - // Validate that all chunks have incrementing sequence numbers and valid chunk types - let secure_channel = trace_read_lock!(self.secure_channel); - self.last_received_sequence_number = Chunker::validate_chunks( - self.last_received_sequence_number + 1, - &secure_channel, - chunks, - )?; - // Now decode - Chunker::decode(chunks, &secure_channel, None) - } - - fn process_chunk( - &mut self, - chunk: MessageChunk, - ) -> Result, StatusCode> { - // trace!("Got a chunk {:?}", chunk); - let chunk = { - let mut secure_channel = trace_write_lock!(self.secure_channel); - secure_channel.verify_and_remove_security(&chunk.data)? - }; - - let secure_channel = trace_read_lock!(self.secure_channel); - let chunk_info = chunk.chunk_info(&secure_channel)?; - drop(secure_channel); - let req_id = chunk_info.sequence_header.request_id; - - match chunk_info.message_header.is_final { - MessageIsFinalType::Intermediate => { - let chunks = self.chunks.entry(req_id).or_insert_with(Vec::new); - debug!( - "receive chunk intermediate {}:{}", - chunk_info.sequence_header.request_id, - chunk_info.sequence_header.sequence_number - ); - chunks.push(MessageChunkWithChunkInfo { - header: chunk_info, - data_with_header: chunk.data, - }); - let chunks_len = self.chunks.len(); - if self.max_chunk_count > 0 && chunks_len > self.max_chunk_count { - error!("too many chunks {}> {}", chunks_len, self.max_chunk_count); - // TODO this code should return an error to be safe - //remove first - let first_req_id = *self.chunks.iter().next().unwrap().0; - self.chunks.remove(&first_req_id); - } - return Ok(None); - } - MessageIsFinalType::FinalError => { - info!("Discarding chunk marked in as final error"); - self.chunks.remove(&chunk_info.sequence_header.request_id); - return Ok(None); - } - _ => { - // Drop through - } - } - - let chunks = self.chunks.entry(req_id).or_insert_with(Vec::new); - chunks.push(MessageChunkWithChunkInfo { - header: chunk_info, - data_with_header: chunk.data, - }); - let in_chunks = Self::merge_chunks(self.chunks.remove(&req_id).unwrap())?; - let message = self.turn_received_chunks_into_message(&in_chunks)?; - - Ok(Some(message)) - } - - fn merge_chunks( - mut chunks: Vec, - ) -> Result, StatusCode> { - if chunks.len() == 1 { - return Ok(vec![MessageChunk { - data: chunks.pop().unwrap().data_with_header, - }]); - } - chunks.sort_by(|a, b| { - a.header - .sequence_header - .sequence_number - .cmp(&b.header.sequence_header.sequence_number) - }); - let mut ret = Vec::with_capacity(chunks.len()); - //not start with 0 - let mut expect_sequence_number = chunks - .get(0) - .unwrap() - .header - .sequence_header - .sequence_number; - for c in chunks { - if c.header.sequence_header.sequence_number != expect_sequence_number { - info!( - "receive wrong chunk expect seq={},got={}", - expect_sequence_number, c.header.sequence_header.sequence_number - ); - continue; //may be duplicate chunk - } - expect_sequence_number += 1; - ret.push(MessageChunk { - data: c.data_with_header, - }); - } - Ok(ret) - } -} - -struct WriteState { - /// The url to connect to - pub secure_channel: Arc>, - pub message_queue: Arc>, - pub writer: WriteHalf, - /// The send buffer - pub send_buffer: MessageWriter, - pub receiver: UnboundedReceiver, -} - -impl Drop for WriteState { - fn drop(&mut self) { - info!("WriteState has dropped"); - } -} - -impl WriteState { - fn new( - secure_channel: Arc>, - message_queue: Arc>, - writer: WriteHalf, - session_state: &SessionState, - ) -> Self { - let receiver = { - let mut queue = trace_write_lock!(message_queue); - queue.clear(); - queue.make_request_channel() - }; - WriteState { - secure_channel, - send_buffer: MessageWriter::new( - session_state.send_buffer_size(), - session_state.max_message_size(), - session_state.max_chunk_count(), - ), - writer, - message_queue, - receiver, - } - } - /// Sends the supplied request asynchronously. The returned value is the request id for the - /// chunked message. Higher levels may or may not find it useful. - fn send_request(&mut self, request: SupportedMessage) -> Result { - let secure_channel = trace_read_lock!(self.secure_channel); - let request_id = self.send_buffer.next_request_id(); - self.send_buffer.write(request_id, request, &secure_channel) - } -} - -/// This is the OPC UA TCP client transport layer -/// -/// At its heart it is a tokio task that runs continuously reading and writing data from the connected -/// server. Requests are taken from the session state, responses are given to the session state. -/// -/// Reading and writing are split so they are independent of each other. -pub(crate) struct TcpTransport { - /// Session state - session_state: Arc>, - /// Secure channel information - secure_channel: Arc>, - /// Connection state - what the connection task is doing - connection_state: ConnectionStateMgr, - /// Message queue for requests / responses - message_queue: Arc>, - /// Tokio runtime - runtime: Arc>, -} - -impl Drop for TcpTransport { - fn drop(&mut self) { - info!("TcpTransport has dropped"); - } -} - -impl Transport for TcpTransport {} - -impl TcpTransport { - const WAIT_POLLING_TIMEOUT: u64 = 100; - - /// Create a new TCP transport layer for the session - pub fn new( - secure_channel: Arc>, - session_state: Arc>, - single_threaded_executor: bool, - ) -> TcpTransport { - let connection_state = { - let session_state = trace_read_lock!(session_state); - session_state.connection_state() - }; - - let message_queue = { - let session_state = trace_read_lock!(session_state); - session_state.message_queue.clone() - }; - - let runtime = { - let mut builder = if !single_threaded_executor { - tokio::runtime::Builder::new_multi_thread() - } else { - tokio::runtime::Builder::new_current_thread() - }; - - builder.enable_all().build().unwrap() - }; - - TcpTransport { - session_state, - secure_channel, - connection_state, - message_queue, - runtime: Arc::new(Mutex::new(runtime)), - } - } - - /// Connects the stream to the specified endpoint - pub fn connect(&self, endpoint_url: &str) -> Result<(), StatusCode> { - debug_assert!( - !self.is_connected(), - "Should not try to connect when already connected" - ); - let (host, port) = hostname_port_from_url( - endpoint_url, - crate::core::constants::DEFAULT_OPC_UA_SERVER_PORT, - )?; - - // Resolve the host name into a socket address - let addr = { - let addr = format!("{}:{}", host, port); - let addrs = addr.to_socket_addrs(); - if let Ok(mut addrs) = addrs { - // Take the first resolved ip addr for the hostname - if let Some(addr) = addrs.next() { - addr - } else { - error!("Invalid address {}, does not resolve to any socket", addr); - return Err(StatusCode::BadTcpEndpointUrlInvalid); - } - } else { - error!( - "Invalid address {}, cannot be parsed {:?}", - addr, - addrs.unwrap_err() - ); - return Err(StatusCode::BadTcpEndpointUrlInvalid); - } - }; - assert_eq!(addr.port(), port); - let endpoint_url = endpoint_url.to_string(); - - let (connection_state, session_state, secure_channel, message_queue) = ( - self.connection_state.clone(), - self.session_state.clone(), - self.secure_channel.clone(), - self.message_queue.clone(), - ); - - let (connection_status_sender, connection_status_receiver) = std::sync::mpsc::channel(); - let conn_task = Self::connection_task( - addr, - connection_state.clone(), - endpoint_url, - session_state.clone(), - secure_channel, - message_queue, - ); - let runtime = self.runtime.clone(); - thread::spawn(move || { - trace_lock!(runtime).block_on(async move { - let conn_result = conn_task.await; - let mut status = conn_result - .as_ref() - .err() - .copied() - .unwrap_or(StatusCode::Good); - let _ = connection_status_sender.send(if status.is_bad() { - Err(status) - } else { - Ok(()) - }); - if let Ok((read, write)) = conn_result { - status = Self::spawn_looping_tasks(read, write) - .await - .err() - .unwrap_or(StatusCode::Good); - } - connection_state.set_finished(status); - trace_write_lock!(session_state).on_session_closed(status); - }); - }); - connection_status_receiver - .recv() - .expect("channel should never be dropped here") - } - - /// Disconnects the stream from the server (if it is connected) - pub fn wait_for_disconnect(&self) { - debug!("Waiting for a disconnect"); - loop { - trace!("Still waiting for a disconnect"); - if self.connection_state.is_finished() { - debug!("Disconnected"); - break; - } - thread::sleep(time::Duration::from_millis(Self::WAIT_POLLING_TIMEOUT)) - } - } - - /// Tests if the transport is connected - pub fn is_connected(&self) -> bool { - self.connection_state.is_connected() - } - - /// This is the main connection task for a connection. - async fn connection_task( - addr: SocketAddr, - connection_state: ConnectionStateMgr, - endpoint_url: String, - session_state: Arc>, - secure_channel: Arc>, - message_queue: Arc>, - ) -> Result<(ReadState, WriteState), StatusCode> { - debug!( - "Creating a connection task to connect to {} with url {}", - addr, endpoint_url - ); - - connection_state.set_state(ConnectionState::Connecting); - let socket = TcpStream::connect(&addr).await.map_err(|err| { - error!("Could not connect to host {}, {:?}", addr, err); - StatusCode::BadCommunicationError - })?; - connection_state.set_state(ConnectionState::Connected); - let (reader, writer) = tokio::io::split(socket); - - let (hello, mut read_state, mut write_state) = { - let session_state = trace_read_lock!(session_state); - let hello = HelloMessage::new( - &endpoint_url, - session_state.send_buffer_size(), - session_state.receive_buffer_size(), - session_state.max_message_size(), - session_state.max_chunk_count(), - ); - let decoding_options = trace_read_lock!(secure_channel).decoding_options(); - let framed_read = FramedRead::new(reader, TcpCodec::new(decoding_options)); - let read_state = ReadState::new( - connection_state.clone(), - secure_channel.clone(), - message_queue.clone(), - &session_state, - framed_read, - ); - let write_state = WriteState::new( - secure_channel.clone(), - message_queue.clone(), - writer, - &session_state, - ); - (hello, read_state, write_state) - }; - - write_state - .writer - .write_all(&hello.encode_to_vec()) - .await - .map_err(|err| { - error!("Cannot send hello to server, err = {:?}", err); - StatusCode::BadCommunicationError - })?; - connection_state.set_state(ConnectionState::WaitingForAck); - match read_state.framed_read.next().await { - Some(Ok(Message::Acknowledge(ack))) => { - // TODO revise our sizes and other things according to the ACK - log::trace!("Received acknowledgement: {:?}", ack) - } - other => { - error!( - "Unexpected error while waiting for server ACK. Expected ACK, got {:?}", - other - ); - return Err(StatusCode::BadConnectionClosed); - } - }; - connection_state.set_state(ConnectionState::Processing); - Ok((read_state, write_state)) - } - - async fn write_bytes_task(write_state: &mut WriteState) -> Result<(), StatusCode> { - let bytes_to_write = write_state.send_buffer.bytes_to_write(); - write_state - .writer - .write_all(&bytes_to_write) - .await - .map_err(|e| { - error!("write bytes task failed: {}", e); - StatusCode::BadCommunicationError - }) - } - - async fn spawn_reading_task(mut read_state: ReadState) -> Result<(), StatusCode> { - // This is the main processing loop that receives and sends messages - trace!("Starting reading loop"); - while let Some(next_msg) = read_state.framed_read.next().await { - log::trace!("Reading loop received message: {:?}", next_msg); - match next_msg { - Ok(message) => { - let mut session_status_code = StatusCode::Good; - match message { - Message::Acknowledge(ack) => { - debug!("Reader got an unexpected ack {:?}", ack); - session_status_code = StatusCode::BadUnexpectedError; - } - Message::Chunk(chunk) => { - match read_state.process_chunk(chunk) { - Ok(response) => { - if let Some(response) = response { - // Store the response - let mut message_queue = - trace_write_lock!(read_state.message_queue); - message_queue.store_response(response); - } - } - Err(err) => session_status_code = err, - }; - } - Message::Error(error) => { - // TODO client should go into an error recovery state, dropping the connection and reestablishing it. - session_status_code = - if let Some(status_code) = StatusCode::from_u32(error.error) { - status_code - } else { - StatusCode::BadUnexpectedError - }; - error!( - "Expecting a chunk, got an error message {}", - session_status_code - ); - } - m => { - error!("Expected a recognized message, got {:?}", m); - break; - } - } - if session_status_code.is_bad() { - return Err(session_status_code); - } - } - Err(err) => { - error!("Read loop error {:?}", err); - return Err(StatusCode::BadConnectionClosed); - } - } - } - debug!( - "Read loop finished, connection state = {:?}", - read_state.state.state() - ); - Ok(()) - } - - async fn spawn_writing_task(mut write_state: WriteState) -> Result<(), StatusCode> { - // In writing, we wait on outgoing requests, encoding each and writing them out - trace!("Starting writing loop"); - while let Some(msg) = write_state.receiver.recv().await { - trace!("Writing loop received message: {:?}", msg); - match msg { - message_queue::Message::Quit => { - debug!("Writer received a quit"); - return Ok(()); - } - message_queue::Message::SupportedMessage(request) => { - trace!("Sending Request: {:?}", request); - let close_connection = - matches!(request, SupportedMessage::CloseSecureChannelRequest(_)); - if close_connection { - debug!("Writer is about to send a CloseSecureChannelRequest which means it should close in a moment"); - } - - // Write it to the outgoing buffer - let request_handle = request.request_handle(); - write_state.send_request(request)?; - // Indicate the request was processed - { - let mut message_queue = trace_write_lock!(write_state.message_queue); - message_queue.request_was_processed(request_handle); - } - Self::write_bytes_task(&mut write_state).await?; - if close_connection { - debug!("Writer is setting the connection state to finished(good)"); - return Ok(()); - } - } - }; - } - Ok(()) - } - - /// This is the main processing loop for the connection. It writes requests and reads responses - /// over the socket to the server. - async fn spawn_looping_tasks( - read_state: ReadState, - write_state: WriteState, - ) -> Result<(), StatusCode> { - log::trace!("Spawning read and write loops"); - // Spawn the reading task loop - let read_loop = Self::spawn_reading_task(read_state); - // Spawn the writing task loop - let write_loop = Self::spawn_writing_task(write_state); - tokio::select! { - status = read_loop => { - log::debug!("Closing connection because the read loop terminated"); - status - } - status = write_loop => { - log::debug!("Closing connection because the write loop terminated"); - status - } - } - // Both the read and write halves are dropped at this point, and the connection is closed - } -} diff --git a/lib/src/client/comms/transport.rs b/lib/src/client/comms/transport.rs deleted file mode 100644 index 37694b04f..000000000 --- a/lib/src/client/comms/transport.rs +++ /dev/null @@ -1,8 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -/// A trait common to all transport implementations -pub(crate) trait Transport { - // Common functions will go here -} diff --git a/lib/src/client/config.rs b/lib/src/client/config.rs index 7c8aeeb00..33dccc03f 100644 --- a/lib/src/client/config.rs +++ b/lib/src/client/config.rs @@ -9,6 +9,7 @@ use std::{ collections::BTreeMap, path::{Path, PathBuf}, str::FromStr, + time::Duration, }; use crate::{ @@ -17,7 +18,7 @@ use crate::{ types::{ApplicationType, MessageSecurityMode, UAString}, }; -use super::session_retry_policy::SessionRetryPolicy; +use super::retry::SessionRetryPolicy; pub const ANONYMOUS_USER_TOKEN_ID: &str = "ANONYMOUS"; @@ -138,71 +139,95 @@ impl ClientEndpoint { #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct DecodingOptions { /// Maximum size of a message chunk in bytes. 0 means no limit - pub max_message_size: usize, + pub(crate) max_message_size: usize, /// Maximum number of chunks in a message. 0 means no limit - pub max_chunk_count: usize, + pub(crate) max_chunk_count: usize, + /// Maximum size of each individual sent message chunk. + pub(crate) max_chunk_size: usize, + /// Maximum size of each received chunk. + pub(crate) max_incoming_chunk_size: usize, /// Maximum length in bytes (not chars!) of a string. 0 actually means 0, i.e. no string permitted - pub max_string_length: usize, + pub(crate) max_string_length: usize, /// Maximum length in bytes of a byte string. 0 actually means 0, i.e. no byte string permitted - pub max_byte_string_length: usize, + pub(crate) max_byte_string_length: usize, /// Maximum number of array elements. 0 actually means 0, i.e. no array permitted - pub max_array_length: usize, + pub(crate) max_array_length: usize, } #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct Performance { /// Ignore clock skew allows the client to make a successful connection to the server, even /// when the client and server clocks are out of sync. - pub ignore_clock_skew: bool, - /// Use a single-threaded executor. The default executor uses a thread pool with a worker - /// thread for each CPU core available on the system. - pub single_threaded_executor: bool, + pub(crate) ignore_clock_skew: bool, + /// Maximum number of monitored items per request when recreating subscriptions on session recreation. + pub(crate) recreate_monitored_items_chunk: usize, + /// Maximum number of inflight messages. + pub(crate) max_inflight_messages: usize, } /// Client OPC UA configuration #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct ClientConfig { /// Name of the application that the client presents itself as to the server - pub application_name: String, + pub(crate) application_name: String, /// The application uri - pub application_uri: String, + pub(crate) application_uri: String, /// Product uri - pub product_uri: String, + pub(crate) product_uri: String, /// Autocreates public / private keypair if they don't exist. For testing/samples only /// since you do not have control of the values - pub create_sample_keypair: bool, + pub(crate) create_sample_keypair: bool, /// Custom certificate path, to be used instead of the default .der certificate path - pub certificate_path: Option, + pub(crate) certificate_path: Option, /// Custom private key path, to be used instead of the default private key path - pub private_key_path: Option, + pub(crate) private_key_path: Option, /// Auto trusts server certificates. For testing/samples only unless you're sure what you're /// doing. - pub trust_server_certs: bool, + pub(crate) trust_server_certs: bool, /// Verify server certificates. For testing/samples only unless you're sure what you're /// doing. - pub verify_server_certs: bool, + pub(crate) verify_server_certs: bool, /// PKI folder, either absolute or relative to executable - pub pki_dir: PathBuf, + pub(crate) pki_dir: PathBuf, /// Preferred locales - pub preferred_locales: Vec, + pub(crate) preferred_locales: Vec, /// Identifier of the default endpoint - pub default_endpoint: String, + pub(crate) default_endpoint: String, /// User tokens - pub user_tokens: BTreeMap, + pub(crate) user_tokens: BTreeMap, /// List of end points - pub endpoints: BTreeMap, + pub(crate) endpoints: BTreeMap, /// Decoding options used for serialization / deserialization - pub decoding_options: DecodingOptions, - /// Max retry limit -1, 0 or number - pub session_retry_limit: i32, - /// Retry interval in milliseconds - pub session_retry_interval: u32, - /// Session timeout period in milliseconds - pub session_timeout: u32, + pub(crate) decoding_options: DecodingOptions, + /// Maximum number of times to attempt to reconnect to the server before giving up. + /// -1 retries forever + pub(crate) session_retry_limit: i32, + + /// Initial delay for exponential backoff when reconnecting to the server. + pub(crate) session_retry_initial: Duration, + /// Max delay between retry attempts. + pub(crate) session_retry_max: Duration, + /// Interval between each keep-alive request sent to the server. + pub(crate) keep_alive_interval: Duration, + + /// Timeout for each request sent to the server. + pub(crate) request_timeout: Duration, + /// Timeout for publish requests, separate from normal timeout since + /// subscriptions are often more time sensitive. + pub(crate) publish_timeout: Duration, + /// Minimum publish interval. Setting this higher will make sure that subscriptions + /// publish together, which may reduce the number of publish requests if you have a lot of subscriptions. + pub(crate) min_publish_interval: Duration, + /// Maximum number of inflight publish requests before further requests are skipped. + pub(crate) max_inflight_publish: usize, + + /// Requested session timeout in milliseconds + pub(crate) session_timeout: u32, + /// Client performance settings - pub performance: Performance, + pub(crate) performance: Performance, /// Session name - pub session_name: String, + pub(crate) session_name: String, } impl Config for ClientConfig { @@ -307,10 +332,7 @@ impl ClientConfig { /// The default PKI directory pub const PKI_DIR: &'static str = "pki"; - pub fn new(application_name: T, application_uri: T) -> Self - where - T: Into, - { + pub fn new(application_name: impl Into, application_uri: impl Into) -> Self { let mut pki_dir = std::env::current_dir().unwrap(); pki_dir.push(Self::PKI_DIR); @@ -330,7 +352,13 @@ impl ClientConfig { user_tokens: BTreeMap::new(), endpoints: BTreeMap::new(), session_retry_limit: SessionRetryPolicy::DEFAULT_RETRY_LIMIT as i32, - session_retry_interval: SessionRetryPolicy::DEFAULT_RETRY_INTERVAL_MS, + session_retry_initial: Duration::from_secs(1), + session_retry_max: Duration::from_secs(30), + keep_alive_interval: Duration::from_secs(10), + request_timeout: Duration::from_secs(60), + min_publish_interval: Duration::from_secs(1), + publish_timeout: Duration::from_secs(60), + max_inflight_publish: 2, session_timeout: 0, decoding_options: DecodingOptions { max_array_length: decoding_options.max_array_length, @@ -338,12 +366,178 @@ impl ClientConfig { max_byte_string_length: decoding_options.max_byte_string_length, max_chunk_count: decoding_options.max_chunk_count, max_message_size: decoding_options.max_message_size, + max_chunk_size: 65535, + max_incoming_chunk_size: 65535, }, performance: Performance { ignore_clock_skew: false, - single_threaded_executor: true, + recreate_monitored_items_chunk: 1000, + max_inflight_messages: 20, }, session_name: "Rust OPC UA Client".into(), } } } + +#[cfg(test)] +mod tests { + use std::{self, collections::BTreeMap, path::PathBuf}; + + use crate::client::ClientBuilder; + use crate::core::config::Config; + use crate::crypto::SecurityPolicy; + use crate::types::*; + + use super::{ClientConfig, ClientEndpoint, ClientUserToken, ANONYMOUS_USER_TOKEN_ID}; + + fn make_test_file(filename: &str) -> PathBuf { + let mut path = std::env::temp_dir(); + path.push(filename); + path + } + + pub fn sample_builder() -> ClientBuilder { + ClientBuilder::new() + .application_name("OPC UA Sample Client") + .application_uri("urn:SampleClient") + .create_sample_keypair(true) + .certificate_path("own/cert.der") + .private_key_path("private/private.pem") + .trust_server_certs(true) + .pki_dir("./pki") + .endpoints(vec![ + ( + "sample_none", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::None.to_str()), + security_mode: String::from(MessageSecurityMode::None), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), + ( + "sample_basic128rsa15", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::Basic128Rsa15.to_str()), + security_mode: String::from(MessageSecurityMode::SignAndEncrypt), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), + ( + "sample_basic256", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::Basic256.to_str()), + security_mode: String::from(MessageSecurityMode::SignAndEncrypt), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), + ( + "sample_basic256sha256", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::Basic256Sha256.to_str()), + security_mode: String::from(MessageSecurityMode::SignAndEncrypt), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), + ]) + .default_endpoint("sample_none") + .user_token( + "sample_user", + ClientUserToken::user_pass("sample1", "sample1pwd"), + ) + .user_token( + "sample_user2", + ClientUserToken::user_pass("sample2", "sample2pwd"), + ) + } + + pub fn default_sample_config() -> ClientConfig { + sample_builder().config() + } + + #[test] + fn client_sample_config() { + // This test exists to create the samples/client.conf file + // This test only exists to dump a sample config + let config = default_sample_config(); + let mut path = std::env::current_dir().unwrap(); + path.push(".."); + path.push("samples"); + path.push("client.conf"); + println!("Path is {:?}", path); + + let saved = config.save(&path); + println!("Saved = {:?}", saved); + assert!(saved.is_ok()); + assert!(config.is_valid()); + } + + #[test] + fn client_config() { + let path = make_test_file("client_config.yaml"); + println!("Client path = {:?}", path); + let config = default_sample_config(); + let saved = config.save(&path); + println!("Saved = {:?}", saved); + assert!(config.save(&path).is_ok()); + if let Ok(config2) = ClientConfig::load(&path) { + assert_eq!(config, config2); + } else { + panic!("Cannot load config from file"); + } + } + + #[test] + fn client_invalid_security_policy_config() { + let mut config = default_sample_config(); + // Security policy is wrong + config.endpoints = BTreeMap::new(); + config.endpoints.insert( + String::from("sample_none"), + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855"), + security_policy: String::from("http://blah"), + security_mode: String::from(MessageSecurityMode::None), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ); + assert!(!config.is_valid()); + } + + #[test] + fn client_invalid_security_mode_config() { + let mut config = default_sample_config(); + // Message security mode is wrong + config.endpoints = BTreeMap::new(); + config.endpoints.insert( + String::from("sample_none"), + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855"), + security_policy: String::from(SecurityPolicy::Basic128Rsa15.to_uri()), + security_mode: String::from("SingAndEncrypt"), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ); + assert!(!config.is_valid()); + } + + #[test] + fn client_anonymous_user_tokens_id() { + let mut config = default_sample_config(); + // id anonymous is reserved + config.user_tokens = BTreeMap::new(); + config.user_tokens.insert( + String::from("ANONYMOUS"), + ClientUserToken { + user: String::new(), + password: Some(String::new()), + cert_path: None, + private_key_path: None, + }, + ); + assert!(!config.is_valid()); + } +} diff --git a/lib/src/client/message_queue.rs b/lib/src/client/message_queue.rs deleted file mode 100644 index 09118fc4d..000000000 --- a/lib/src/client/message_queue.rs +++ /dev/null @@ -1,144 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{collections::HashMap, sync::mpsc::SyncSender}; - -use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; - -use crate::core::supported_message::SupportedMessage; - -pub(crate) struct MessageQueue { - /// The requests that are in-flight, defined by their request handle and optionally a sender that will be notified with the response. - /// Basically, the sent requests reside here until the response returns at which point the entry is removed. - /// If a response is received for which there is no entry, the response will be discarded. - inflight_requests: HashMap>>, - /// A map of incoming responses waiting to be processed - responses: HashMap, - /// This is the queue that messages will be sent onto the transport for sending - sender: Option>, -} - -#[derive(Debug)] -pub enum Message { - Quit, - SupportedMessage(SupportedMessage), -} - -impl MessageQueue { - pub fn new() -> MessageQueue { - MessageQueue { - inflight_requests: HashMap::new(), - responses: HashMap::new(), - sender: None, - } - } - - pub(crate) fn clear(&mut self) { - self.inflight_requests.clear(); - self.responses.clear(); - } - - // Creates the transmission queue that outgoing requests will be sent over - pub(crate) fn make_request_channel(&mut self) -> UnboundedReceiver { - let (tx, rx) = mpsc::unbounded_channel(); - self.sender = Some(tx.clone()); - rx - } - - pub(crate) fn request_was_processed(&mut self, request_handle: u32) { - debug!("Request {} was processed by the server", request_handle); - } - - fn send_message(&self, message: Message) -> bool { - let sender = self.sender.as_ref().expect( - "MessageQueue::send_message should never be called before make_request_channel", - ); - if sender.is_closed() { - error!("Send message will fail because sender has been closed"); - false - } else if let Err(err) = sender.send(message) { - debug!("Cannot send message to message receiver, error {}", err); - false - } else { - true - } - } - - /// Called by the session to add a request to be sent. The sender parameter - /// is supplied by synchronous callers to be notified the moment the response is received. - /// Async callers, e.g. publish requests can supply None. - pub(crate) fn add_request( - &mut self, - request: SupportedMessage, - sender: Option>, - ) { - let request_handle = request.request_handle(); - trace!("Sending request {:?} to be sent", request); - self.inflight_requests.insert(request_handle, sender); - let _ = self.send_message(Message::SupportedMessage(request)); - } - - pub(crate) fn quit(&self) { - debug!("Sending a quit to the message receiver"); - let _ = self.send_message(Message::Quit); - } - - /// Called when a session's request times out. This call allows the session state to remove - /// the request as pending and ignore any response that arrives for it. - pub(crate) fn request_has_timed_out(&mut self, request_handle: u32) { - info!( - "Request {} has timed out and any response will be ignored", - request_handle - ); - let _ = self.inflight_requests.remove(&request_handle); - } - - /// Called by the connection to store a response for the consumption of the session. - pub(crate) fn store_response(&mut self, response: SupportedMessage) { - // Remove corresponding request handle from inflight queue, add to responses - let request_handle = response.request_handle(); - trace!("Received response {:?}", response); - debug!("Response to Request {} has been stored", request_handle); - // Remove the inflight request - // This true / false is slightly clunky. - if let Some(sender) = self.inflight_requests.remove(&request_handle) { - if let Some(sender) = sender { - // Synchronous request - if let Err(e) = sender.send(response) { - error!( - "Cannot send a response to a synchronous request {} because send failed, error = {}", - request_handle, - e - ); - } - } else { - self.responses.insert(request_handle, response); - } - } else { - error!("A response with request handle {} doesn't belong to any request and will be ignored, inflight requests = {:?}, request = {:?}", request_handle, self.inflight_requests, response); - if let SupportedMessage::ServiceFault(response) = response { - error!( - "Unhandled response is a service fault, service result = {}", - response.response_header.service_result - ) - } - } - } - - /// Takes all pending asynchronous responses into a vector sorted oldest to latest and - /// returns them to the caller. - pub(crate) fn async_responses(&mut self) -> Vec { - // Gather up all request handles - let mut async_handles = self.responses.keys().copied().collect::>(); - - // Order them from oldest to latest (except if handles wrap) - async_handles.sort(); - - // Remove each item from the map and return to caller - async_handles - .iter() - .map(|k| self.responses.remove(k).unwrap()) - .collect() - } -} diff --git a/lib/src/client/mod.rs b/lib/src/client/mod.rs index 3fb757dfd..517f84de7 100644 --- a/lib/src/client/mod.rs +++ b/lib/src/client/mod.rs @@ -37,10 +37,15 @@ //! //! ```no_run //! use std::sync::Arc; -//! use opcua::client::prelude::*; -//! use opcua::sync::*; -//! -//! fn main() { +//! use std::time::Duration; +//! use opcua::client::{ClientBuilder, IdentityToken, Session, DataChangeCallback, MonitoredItem}; +//! use opcua::types::{ +//! EndpointDescription, MessageSecurityMode, UserTokenPolicy, StatusCode, +//! NodeId, TimestampsToReturn, MonitoredItemCreateRequest, DataValue +//! }; +//! +//! #[tokio::main] +//! async fn main() { //! let mut client = ClientBuilder::new() //! .application_name("My First Client") //! .application_uri("urn:MyFirstClient") @@ -53,34 +58,45 @@ //! // the endpoint url, security policy, message security mode and user token policy. //! let endpoint: EndpointDescription = ("opc.tcp://localhost:4855/", "None", MessageSecurityMode::None, UserTokenPolicy::anonymous()).into(); //! -//! // Create the session -//! let session = client.connect_to_endpoint(endpoint, IdentityToken::Anonymous).unwrap(); +//! // Create the session and event loop +//! let (session, event_loop) = client.new_session_from_endpoint(endpoint, IdentityToken::Anonymous).await.unwrap(); +//! let handle = event_loop.spawn(); +//! +//! session.wait_for_connection().await; //! //! // Create a subscription and monitored items -//! if subscribe_to_values(session.clone()).is_ok() { -//! let _ = Session::run(session); +//! if subscribe_to_values(&session).await.is_ok() { +//! handle.await.unwrap(); //! } else { //! println!("Error creating subscription"); //! } //! } //! -//! fn subscribe_to_values(session: Arc>) -> Result<(), StatusCode> { -//! let mut session = session.write(); +//! async fn subscribe_to_values(session: &Session) -> Result<(), StatusCode> { //! // Create a subscription polling every 2s with a callback -//! let subscription_id = session.create_subscription(2000.0, 10, 30, 0, 0, true, DataChangeCallback::new(|changed_monitored_items| { -//! println!("Data change from server:"); -//! changed_monitored_items.iter().for_each(|item| print_value(item)); -//! }))?; +//! let subscription_id = session.create_subscription( +//! Duration::from_secs(2), +//! 10, +//! 30, +//! 0, +//! 0, +//! true, +//! DataChangeCallback::new( +//! |value, monitored_item| { +//! println!("Data change from server:"); +//! print_value(value, monitored_item); +//! } +//! ) +//! ).await?; //! // Create some monitored items //! let items_to_create: Vec = ["v1", "v2", "v3", "v4"].iter() //! .map(|v| NodeId::new(2, *v).into()).collect(); -//! let _ = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create)?; +//! let _ = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, items_to_create).await?; //! Ok(()) //! } //! -//! fn print_value(item: &MonitoredItem) { +//! fn print_value(data_value: DataValue, item: &MonitoredItem) { //! let node_id = &item.item_to_monitor().node_id; -//! let data_value = item.last_value(); //! if let Some(ref value) = data_value.value { //! println!("Item \"{}\", Value = {:?}", node_id, value); //! } else { @@ -94,68 +110,29 @@ //! [`ClientBuilder`]: ./client_builder/struct.ClientBuilder.html //! [`Session`]: ./session/struct.Session.html -use crate::core::supported_message::SupportedMessage; -use crate::types::{response_header::ResponseHeader, status_code::StatusCode}; - -mod comms; -mod message_queue; -mod subscription; -mod subscription_state; - -// Use through prelude mod builder; -mod callbacks; -mod client; mod config; +mod retry; mod session; -mod session_retry_policy; +mod transport; -/// Process the service result, i.e. where the request "succeeded" but the response -/// contains a failure status code. -pub(crate) fn process_service_result(response_header: &ResponseHeader) -> Result<(), StatusCode> { - if response_header.service_result.is_bad() { - info!( - "Received a bad service result {} from the request", - response_header.service_result - ); - Err(response_header.service_result) - } else { - Ok(()) - } -} - -pub(crate) fn process_unexpected_response(response: SupportedMessage) -> StatusCode { - match response { - SupportedMessage::ServiceFault(service_fault) => { - error!( - "Received a service fault of {} for the request", - service_fault.response_header.service_result - ); - service_fault.response_header.service_result - } - _ => { - error!("Received an unexpected response to the request"); - StatusCode::BadUnknownResponse - } - } -} +use std::path::PathBuf; -pub mod prelude { - pub use crate::{ - core::prelude::*, - crypto::*, - types::{service_types::*, status_code::StatusCode}, - }; +pub use builder::ClientBuilder; +pub use config::{ClientConfig, ClientEndpoint, ClientUserToken, ANONYMOUS_USER_TOKEN_ID}; +pub use session::{ + Client, DataChangeCallback, EventCallback, MonitoredItem, OnSubscriptionNotification, Session, + SessionActivity, SessionConnectMode, SessionEventLoop, SessionPollResult, Subscription, + SubscriptionCallbacks, +}; +pub use transport::AsyncSecureChannel; - pub use crate::client::{ - builder::*, - callbacks::*, - client::*, - config::*, - session::{services::*, session::*}, - subscription::MonitoredItem, - }; +#[derive(Debug, Clone)] +pub enum IdentityToken { + /// Anonymous identity token + Anonymous, + /// User name and a password + UserName(String, String), + /// X5090 cert - a path to the cert.der, and private.pem + X509(PathBuf, PathBuf), } - -#[cfg(test)] -mod tests; diff --git a/lib/src/client/retry.rs b/lib/src/client/retry.rs new file mode 100644 index 000000000..ebb9fbe77 --- /dev/null +++ b/lib/src/client/retry.rs @@ -0,0 +1,137 @@ +use std::time::Duration; + +pub(crate) struct ExponentialBackoff { + max_sleep: Duration, + max_retries: Option, + current_sleep: Duration, + retry_count: u32, +} + +impl ExponentialBackoff { + pub fn new(max_sleep: Duration, max_retries: Option, initial_sleep: Duration) -> Self { + Self { + max_sleep, + max_retries, + current_sleep: initial_sleep, + retry_count: 0, + } + } +} + +impl Iterator for ExponentialBackoff { + type Item = Duration; + + fn next(&mut self) -> Option { + if self.max_retries.is_some_and(|max| max <= self.retry_count) { + return None; + } + + let next_sleep = self.current_sleep.clone(); + self.current_sleep = self.max_sleep.min(self.current_sleep * 2); + self.retry_count += 1; + + Some(next_sleep) + } +} + +#[derive(Debug, Clone)] +pub struct SessionRetryPolicy { + reconnect_max_sleep: Duration, + reconnect_retry_limit: Option, + reconnect_initial_sleep: Duration, +} + +impl Default for SessionRetryPolicy { + fn default() -> Self { + Self { + reconnect_max_sleep: Duration::from_millis(Self::DEFAULT_MAX_SLEEP_MS), + reconnect_retry_limit: Some(Self::DEFAULT_RETRY_LIMIT), + reconnect_initial_sleep: Duration::from_millis(Self::DEFAULT_INITIAL_SLEEP_MS), + } + } +} + +impl SessionRetryPolicy { + pub const DEFAULT_RETRY_LIMIT: u32 = 10; + pub const DEFAULT_INITIAL_SLEEP_MS: u64 = 500; + pub const DEFAULT_MAX_SLEEP_MS: u64 = 30000; + + pub fn new(max_sleep: Duration, retry_limit: Option, initial_sleep: Duration) -> Self { + Self { + reconnect_max_sleep: max_sleep, + reconnect_retry_limit: retry_limit, + reconnect_initial_sleep: initial_sleep, + } + } + + pub(crate) fn new_backoff(&self) -> ExponentialBackoff { + ExponentialBackoff::new( + self.reconnect_max_sleep, + self.reconnect_retry_limit, + self.reconnect_initial_sleep, + ) + } + + pub fn infinity(max_sleep: Duration, initial_sleep: Duration) -> Self { + Self { + reconnect_initial_sleep: initial_sleep, + reconnect_retry_limit: None, + reconnect_max_sleep: max_sleep, + } + } + + pub fn never() -> Self { + Self { + reconnect_retry_limit: Some(0), + ..Default::default() + } + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::SessionRetryPolicy; + + #[test] + fn session_retry() { + let policy = SessionRetryPolicy::default(); + + let mut backoff = policy.new_backoff(); + + assert_eq!(Some(Duration::from_millis(500)), backoff.next()); + assert_eq!(Some(Duration::from_millis(1000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(2000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(4000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(8000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(16000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(30000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(30000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(30000)), backoff.next()); + assert_eq!(Some(Duration::from_millis(30000)), backoff.next()); + assert_eq!(None, backoff.next()); + assert_eq!(None, backoff.next()); + } + + #[test] + fn session_retry_infinity() { + let policy = + SessionRetryPolicy::infinity(Duration::from_millis(3000), Duration::from_millis(500)); + + let mut backoff = policy.new_backoff(); + + for _ in 0..100 { + assert!(backoff.next().is_some()); + } + + assert_eq!(Some(Duration::from_millis(3000)), backoff.next()); + } + + #[test] + fn session_retry_never() { + let policy = SessionRetryPolicy::never(); + let mut backoff = policy.new_backoff(); + assert!(backoff.next().is_none()); + } +} diff --git a/lib/src/client/session/client.rs b/lib/src/client/session/client.rs new file mode 100644 index 000000000..fd11b279f --- /dev/null +++ b/lib/src/client/session/client.rs @@ -0,0 +1,779 @@ +use std::{path::PathBuf, str::FromStr, sync::Arc}; + +use chrono::Duration; +use tokio::{pin, select}; + +use crate::{ + client::{ + retry::SessionRetryPolicy, + transport::{tcp::TransportConfiguration, TransportPollResult}, + AsyncSecureChannel, ClientConfig, ClientEndpoint, IdentityToken, ANONYMOUS_USER_TOKEN_ID, + }, + core::{ + comms::url::{ + hostname_from_url, is_opc_ua_binary_url, is_valid_opc_ua_url, + server_url_from_endpoint_url, url_matches_except_host, url_with_replaced_hostname, + }, + config::Config, + supported_message::SupportedMessage, + }, + crypto::{CertificateStore, SecurityPolicy}, + sync::RwLock, + types::{ + ApplicationDescription, DecodingOptions, EndpointDescription, FindServersRequest, + GetEndpointsRequest, MessageSecurityMode, RegisterServerRequest, RegisteredServer, + StatusCode, + }, +}; + +use super::{ + process_service_result, process_unexpected_response, Session, SessionEventLoop, SessionInfo, +}; + +pub struct Client { + /// Client configuration + config: ClientConfig, + /// Certificate store is where certificates go. + certificate_store: Arc>, + /// The session retry policy for new sessions + session_retry_policy: SessionRetryPolicy, +} + +impl Client { + /// Create a new client from config. + /// + /// Note that this does not make any connection to the server. + /// + /// # Arguments + /// + /// * `config` - Client configuration object. + pub fn new(config: ClientConfig) -> Self { + let application_description = if config.create_sample_keypair { + Some(config.application_description()) + } else { + None + }; + + let (mut certificate_store, client_certificate, client_pkey) = + CertificateStore::new_with_x509_data( + &config.pki_dir, + false, + config.certificate_path.as_deref(), + config.private_key_path.as_deref(), + application_description, + ); + if client_certificate.is_none() || client_pkey.is_none() { + error!("Client is missing its application instance certificate and/or its private key. Encrypted endpoints will not function correctly.") + } + + // Clients may choose to skip additional server certificate validations + certificate_store.set_skip_verify_certs(!config.verify_server_certs); + + // Clients may choose to auto trust servers to save some messing around with rejected certs + certificate_store.set_trust_unknown_certs(config.trust_server_certs); + + // The session retry policy dictates how many times to retry if connection to the server goes down + // and on what interval + + let session_retry_policy = SessionRetryPolicy::new( + config.session_retry_max, + if config.session_retry_limit < 0 { + None + } else { + Some(config.session_retry_limit as u32) + }, + config.session_retry_initial, + ); + + Self { + config, + session_retry_policy, + certificate_store: Arc::new(RwLock::new(certificate_store)), + } + } + + /// Connects to a named endpoint that you have defined in the `ClientConfig` + /// and creates a [`Session`] for that endpoint. Note that `GetEndpoints` is first + /// called on the server and it is expected to support the endpoint you intend to connect to. + /// + /// # Returns + /// + /// * `Ok((Arc, SessionEventLoop))` - Session and event loop. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn connect_to_endpoint_id( + &mut self, + endpoint_id: Option<&str>, + ) -> Result<(Arc, SessionEventLoop), StatusCode> { + // Ask the server associated with the default endpoint for its list of endpoints + let endpoints = match self.get_server_endpoints().await { + Err(status_code) => { + error!("Cannot get endpoints for server, error - {}", status_code); + return Err(status_code); + } + Ok(endpoints) => endpoints, + }; + + info!("Server has these endpoints:"); + endpoints.iter().for_each(|e| { + info!( + " {} - {:?} / {:?}", + e.endpoint_url, + SecurityPolicy::from_str(e.security_policy_uri.as_ref()).unwrap(), + e.security_mode + ) + }); + + // Create a session to an endpoint. If an endpoint id is specified use that + if let Some(endpoint_id) = endpoint_id { + self.new_session_from_id(endpoint_id, &endpoints) + } else { + self.new_session(&endpoints) + } + .map_err(|_| StatusCode::BadConfigurationError) + } + + /// Connects to an ad-hoc server endpoint description. + /// + /// This function returns both a reference to the session, and a `SessionEventLoop`. You must run and + /// poll the event loop in order to actually establish a connection. + /// + /// This method will not attempt to create a session on the server, that will only happen once you start polling + /// the session event loop. + /// + /// # Arguments + /// + /// * `endpoint` - Discovery endpoint, the client will first connect to this in order to get a list of the + /// available endpoints on the server. + /// * `user_identity_token` - Identity token to use for authentication. + /// + /// # Returns + /// + /// * `Ok((Arc, SessionEventLoop))` - Session and event loop. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn new_session_from_endpoint( + &mut self, + endpoint: impl Into, + user_identity_token: IdentityToken, + ) -> Result<(Arc, SessionEventLoop), StatusCode> { + let endpoint = endpoint.into(); + + // Get the server endpoints + let server_url = endpoint.endpoint_url.as_ref(); + + let server_endpoints = self + .get_server_endpoints_from_url(server_url) + .await + .map_err(|status_code| { + error!("Cannot get endpoints for server, error - {}", status_code); + status_code + })?; + + // Find the server endpoint that matches the one desired + let security_policy = SecurityPolicy::from_str(endpoint.security_policy_uri.as_ref()) + .map_err(|_| StatusCode::BadSecurityPolicyRejected)?; + let server_endpoint = Self::find_matching_endpoint( + &server_endpoints, + endpoint.endpoint_url.as_ref(), + security_policy, + endpoint.security_mode, + ) + .ok_or(StatusCode::BadTcpEndpointUrlInvalid) + .map_err(|status_code| { + error!( + "Cannot find matching endpoint for {}", + endpoint.endpoint_url.as_ref() + ); + status_code + })?; + + Ok(self + .new_session_from_info(SessionInfo { + endpoint: server_endpoint, + user_identity_token, + preferred_locales: Vec::new(), + }) + .unwrap()) + } + + /// Connects to an a server directly using provided [`SessionInfo`]. + /// + /// This function returns both a reference to the session, and a `SessionEventLoop`. You must run and + /// poll the event loop in order to actually establish a connection. + /// + /// This method will not attempt to create a session on the server, that will only happen once you start polling + /// the session event loop. + /// + /// # Arguments + /// + /// * `session_info` - Session info for creating a new session. + /// + /// # Returns + /// + /// * `Ok((Arc, SessionEventLoop))` - Session and event loop. + /// * `Err(String)` - Endpoint is invalid. + /// + pub fn new_session_from_info( + &mut self, + session_info: impl Into, + ) -> Result<(Arc, SessionEventLoop), String> { + let session_info = session_info.into(); + if !is_opc_ua_binary_url(session_info.endpoint.endpoint_url.as_ref()) { + Err(format!( + "Endpoint url {}, is not a valid / supported url", + session_info.endpoint.endpoint_url + )) + } else { + Ok(Session::new( + self.certificate_store.clone(), + session_info, + self.config.session_name.clone().into(), + self.config.application_description(), + self.session_retry_policy.clone(), + self.decoding_options(), + &self.config, + )) + } + } + + /// Creates a new [`AsyncSession`] using the default endpoint specified in the config. If + /// there is no default, or the endpoint does not exist, this function will return an error + /// + /// This function returns both a reference to the session, and a `SessionEventLoop`. You must run and + /// poll the event loop in order to actually establish a connection. + /// + /// This method will not attempt to create a session on the server, that will only happen once you start polling + /// the session event loop. + /// + /// # Arguments + /// + /// * `endpoints` - A list of [`EndpointDescription`] containing the endpoints available on the server. + /// + /// # Returns + /// + /// * `Ok((Arc, SessionEventLoop))` - Session and event loop. + /// * `Err(String)` - Endpoint is invalid. + /// + pub fn new_session( + &mut self, + endpoints: &[EndpointDescription], + ) -> Result<(Arc, SessionEventLoop), String> { + let endpoint = self.default_endpoint()?; + let session_info = self.session_info_for_endpoint(&endpoint, endpoints)?; + self.new_session_from_info(session_info) + } + + /// Creates a new [`AsyncSession`] using the named endpoint id. If there is no + /// endpoint of that id in the config, this function will return an error + /// + /// This function returns both a reference to the session, and a `SessionEventLoop`. You must run and + /// poll the event loop in order to actually establish a connection. + /// + /// This method will not attempt to create a session on the server, that will only happen once you start polling + /// the session event loop. + /// + /// # Arguments + /// + /// * `endpoint_id` - ID matching an endpoint defined in config. + /// * `endpoints` - List of endpoints available on the server. + /// + pub fn new_session_from_id( + &mut self, + endpoint_id: impl Into, + endpoints: &[EndpointDescription], + ) -> Result<(Arc, SessionEventLoop), String> { + let endpoint_id = endpoint_id.into(); + let endpoint = { + let endpoint = self.config.endpoints.get(&endpoint_id); + if endpoint.is_none() { + return Err(format!("Cannot find endpoint with id {}", endpoint_id)); + } + // This clone is an unfortunate workaround to a lifetime issue between the borrowed + // endpoint and the need to call the mutable new_session_from_endpoint() + endpoint.unwrap().clone() + }; + let session_info = self.session_info_for_endpoint(&endpoint, endpoints)?; + self.new_session_from_info(session_info) + } + + /// Creates a [`SessionInfo`](SessionInfo) information from the supplied client endpoint. + fn session_info_for_endpoint( + &self, + client_endpoint: &ClientEndpoint, + endpoints: &[EndpointDescription], + ) -> Result { + // Enumerate endpoints looking for matching one + if let Ok(security_policy) = SecurityPolicy::from_str(&client_endpoint.security_policy) { + let security_mode = MessageSecurityMode::from(client_endpoint.security_mode.as_ref()); + if security_mode != MessageSecurityMode::Invalid { + let endpoint_url = client_endpoint.url.clone(); + // Now find a matching endpoint from those on the server + let endpoint = Self::find_matching_endpoint( + endpoints, + &endpoint_url, + security_policy, + security_mode, + ); + if endpoint.is_none() { + Err(format!("Endpoint {}, {:?} / {:?} does not match against any supplied by the server", endpoint_url, security_policy, security_mode)) + } else if let Some(user_identity_token) = + self.client_identity_token(client_endpoint.user_token_id.clone()) + { + info!( + "Creating a session for endpoint {}, {:?} / {:?}", + endpoint_url, security_policy, security_mode + ); + let preferred_locales = self.config.preferred_locales.clone(); + Ok(SessionInfo { + endpoint: endpoint.unwrap(), + user_identity_token, + preferred_locales, + }) + } else { + Err(format!( + "Endpoint {} user id cannot be found", + client_endpoint.user_token_id + )) + } + } else { + Err(format!( + "Endpoint {} security mode {} is invalid", + client_endpoint.url, client_endpoint.security_mode + )) + } + } else { + Err(format!( + "Endpoint {} security policy {} is invalid", + client_endpoint.url, client_endpoint.security_policy + )) + } + } + + /// Create a secure channel using the provided [`SessionInfo`]. + /// + /// This is used when creating temporary connections to the server, when creating a session, + /// [`AsyncSession`] manages its own channel. + fn channel_from_session_info(&self, session_info: SessionInfo) -> AsyncSecureChannel { + AsyncSecureChannel::new( + self.certificate_store.clone(), + session_info, + self.session_retry_policy.clone(), + self.decoding_options(), + self.config.performance.ignore_clock_skew, + Arc::default(), + TransportConfiguration { + max_pending_incoming: 5, + max_inflight: self.config.performance.max_inflight_messages, + send_buffer_size: self.config.decoding_options.max_chunk_size, + recv_buffer_size: self.config.decoding_options.max_incoming_chunk_size, + max_message_size: self.config.decoding_options.max_message_size, + max_chunk_count: self.config.decoding_options.max_chunk_count, + }, + ) + } + + /// Returns an identity token corresponding to the matching user in the configuration. Or None + /// if there is no matching token. + fn client_identity_token(&self, user_token_id: impl Into) -> Option { + let user_token_id = user_token_id.into(); + if user_token_id == ANONYMOUS_USER_TOKEN_ID { + Some(IdentityToken::Anonymous) + } else { + let token = self.config.user_tokens.get(&user_token_id)?; + + if let Some(ref password) = token.password { + Some(IdentityToken::UserName( + token.user.clone(), + password.clone(), + )) + } else if let Some(ref cert_path) = token.cert_path { + token.private_key_path.as_ref().map(|private_key_path| { + IdentityToken::X509(PathBuf::from(cert_path), PathBuf::from(private_key_path)) + }) + } else { + None + } + } + } + + /// Gets the [`ClientEndpoint`] information for the default endpoint, as defined + /// by the configuration. If there is no default endpoint, this function will return an error. + /// + /// # Returns + /// + /// * `Ok(ClientEndpoint)` - The default endpoint set in config. + /// * `Err(String)` - No default endpoint could be found. + pub fn default_endpoint(&self) -> Result { + let default_endpoint_id = self.config.default_endpoint.clone(); + if default_endpoint_id.is_empty() { + Err("No default endpoint has been specified".to_string()) + } else if let Some(endpoint) = self.config.endpoints.get(&default_endpoint_id) { + Ok(endpoint.clone()) + } else { + Err(format!( + "Cannot find default endpoint with id {}", + default_endpoint_id + )) + } + } + + /// Get the list of endpoints for the server at the configured default endpoint. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of the available endpoints on the server. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + pub async fn get_server_endpoints(&self) -> Result, StatusCode> { + if let Ok(default_endpoint) = self.default_endpoint() { + if let Ok(server_url) = server_url_from_endpoint_url(&default_endpoint.url) { + self.get_server_endpoints_from_url(server_url).await + } else { + error!( + "Cannot create a server url from the specified endpoint url {}", + default_endpoint.url + ); + Err(StatusCode::BadUnexpectedError) + } + } else { + error!("There is no default endpoint, so cannot get endpoints"); + Err(StatusCode::BadUnexpectedError) + } + } + + fn decoding_options(&self) -> DecodingOptions { + let decoding_options = &self.config.decoding_options; + DecodingOptions { + max_chunk_count: decoding_options.max_chunk_count, + max_message_size: decoding_options.max_message_size, + max_string_length: decoding_options.max_string_length, + max_byte_string_length: decoding_options.max_byte_string_length, + max_array_length: decoding_options.max_array_length, + client_offset: Duration::zero(), + ..Default::default() + } + } + + async fn get_server_endpoints_inner( + &self, + endpoint: &EndpointDescription, + channel: &AsyncSecureChannel, + ) -> Result, StatusCode> { + let request = GetEndpointsRequest { + request_header: channel.make_request_header(self.config.request_timeout), + endpoint_url: endpoint.endpoint_url.clone(), + locale_ids: None, + profile_uris: None, + }; + // Send the message and wait for a response. + let response = channel.send(request, self.config.request_timeout).await?; + if let SupportedMessage::GetEndpointsResponse(response) = response { + process_service_result(&response.response_header)?; + match response.endpoints { + None => Ok(Vec::new()), + Some(endpoints) => Ok(endpoints), + } + } else { + Err(process_unexpected_response(response)) + } + } + + /// Get the list of endpoints for the server at the given URL. + /// + /// # Arguments + /// + /// * `server_url` - URL of the discovery server to get endpoints from. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of the available endpoints on the server. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + pub async fn get_server_endpoints_from_url( + &self, + server_url: impl Into, + ) -> Result, StatusCode> { + let server_url = server_url.into(); + if !is_opc_ua_binary_url(&server_url) { + Err(StatusCode::BadTcpEndpointUrlInvalid) + } else { + let preferred_locales = Vec::new(); + // Most of these fields mean nothing when getting endpoints + let endpoint = EndpointDescription::from(server_url.as_ref()); + let session_info = SessionInfo { + endpoint: endpoint.clone(), + user_identity_token: IdentityToken::Anonymous, + preferred_locales, + }; + let channel = self.channel_from_session_info(session_info); + + let mut evt_loop = channel.connect().await?; + + let send_fut = self.get_server_endpoints_inner(&endpoint, &channel); + pin!(send_fut); + + let res = loop { + select! { + r = evt_loop.poll() => { + if let TransportPollResult::Closed(e) = r { + return Err(e); + } + }, + res = &mut send_fut => break res + } + }; + + channel.close_channel().await; + + loop { + if matches!(evt_loop.poll().await, TransportPollResult::Closed(_)) { + break; + } + } + + res + } + } + + async fn find_servers_inner( + &self, + endpoint_url: String, + channel: &AsyncSecureChannel, + ) -> Result, StatusCode> { + let request = FindServersRequest { + request_header: channel.make_request_header(self.config.request_timeout), + endpoint_url: endpoint_url.into(), + locale_ids: None, + server_uris: None, + }; + + let response = channel.send(request, self.config.request_timeout).await?; + if let SupportedMessage::FindServersResponse(response) = response { + process_service_result(&response.response_header)?; + let servers = if let Some(servers) = response.servers { + servers + } else { + Vec::new() + }; + Ok(servers) + } else { + Err(process_unexpected_response(response)) + } + } + + /// Connects to a discovery server and asks the server for a list of + /// available servers' [`ApplicationDescription`]. + /// + /// # Arguments + /// + /// * `discovery_endpoint_url` - Discovery endpoint to connect to. + /// + /// # Returns + /// + /// * `Ok(Vec)` - List of descriptions for servers known to the discovery server. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + pub async fn find_servers( + &mut self, + discovery_endpoint_url: impl Into, + ) -> Result, StatusCode> { + let discovery_endpoint_url = discovery_endpoint_url.into(); + debug!("find_servers, {}", discovery_endpoint_url); + let endpoint = EndpointDescription::from(discovery_endpoint_url.as_ref()); + let session_info = SessionInfo { + endpoint: endpoint.clone(), + user_identity_token: IdentityToken::Anonymous, + preferred_locales: Vec::new(), + }; + let channel = self.channel_from_session_info(session_info); + + let mut evt_loop = channel.connect().await?; + + let send_fut = self.find_servers_inner(discovery_endpoint_url, &channel); + pin!(send_fut); + + let res = loop { + select! { + r = evt_loop.poll() => { + if let TransportPollResult::Closed(e) = r { + return Err(e); + } + }, + res = &mut send_fut => break res + } + }; + + channel.close_channel().await; + + loop { + if matches!(evt_loop.poll().await, TransportPollResult::Closed(_)) { + break; + } + } + + res + } + + /// Find an endpoint supplied from the list of endpoints that matches the input criteria. + /// + /// # Arguments + /// + /// * `endpoints` - List of available endpoints on the server. + /// * `endpoint_url` - Given endpoint URL. + /// * `security_policy` - Required security policy. + /// * `security_mode` - Required security mode. + /// + /// # Returns + /// + /// * `Some(EndpointDescription)` - Validated endpoint. + /// * `None` - No matching endpoint was found. + pub fn find_matching_endpoint( + endpoints: &[EndpointDescription], + endpoint_url: &str, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + ) -> Option { + if security_policy == SecurityPolicy::Unknown { + panic!("Cannot match against unknown security policy"); + } + + let mut matching_endpoint = endpoints + .iter() + .find(|e| { + // Endpoint matches if the security mode, policy and url match + security_mode == e.security_mode + && security_policy == SecurityPolicy::from_uri(e.security_policy_uri.as_ref()) + && url_matches_except_host(endpoint_url, e.endpoint_url.as_ref()) + }) + .cloned()?; + + let hostname = hostname_from_url(endpoint_url).ok()?; + let new_endpoint_url = + url_with_replaced_hostname(matching_endpoint.endpoint_url.as_ref(), &hostname).ok()?; + + // Issue #16, #17 - the server may advertise an endpoint whose hostname is inaccessible + // to the client so substitute the advertised hostname with the one the client supplied. + matching_endpoint.endpoint_url = new_endpoint_url.into(); + Some(matching_endpoint) + } + + /// Determine if we recognize the security of this endpoint. + /// + /// # Arguments + /// + /// * `endpoint` - Endpoint to check. + /// + /// # Returns + /// + /// * `bool` - `true` if the endpoint is supported. + pub fn is_supported_endpoint(&self, endpoint: &EndpointDescription) -> bool { + if let Ok(security_policy) = SecurityPolicy::from_str(endpoint.security_policy_uri.as_ref()) + { + !matches!(security_policy, SecurityPolicy::Unknown) + } else { + false + } + } + + async fn register_server_inner( + &self, + server: RegisteredServer, + channel: &AsyncSecureChannel, + ) -> Result<(), StatusCode> { + let request = RegisterServerRequest { + request_header: channel.make_request_header(self.config.request_timeout), + server, + }; + let response = channel.send(request, self.config.request_timeout).await?; + if let SupportedMessage::RegisterServerResponse(response) = response { + process_service_result(&response.response_header)?; + Ok(()) + } else { + Err(process_unexpected_response(response)) + } + } + + /// This function is used by servers that wish to register themselves with a discovery server. + /// i.e. one server is the client to another server. The server sends a [`RegisterServerRequest`] + /// to the discovery server to register itself. Servers are expected to re-register themselves periodically + /// with the discovery server, with a maximum of 10 minute intervals. + /// + /// See OPC UA Part 4 - Services 5.4.5 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `server` - The server to register + /// + /// # Returns + /// + /// * `Ok(())` - Success + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn register_server( + &mut self, + discovery_endpoint_url: impl Into, + server: RegisteredServer, + ) -> Result<(), StatusCode> { + let discovery_endpoint_url = discovery_endpoint_url.into(); + if !is_valid_opc_ua_url(&discovery_endpoint_url) { + error!( + "Discovery endpoint url \"{}\" is not a valid OPC UA url", + discovery_endpoint_url + ); + return Err(StatusCode::BadTcpEndpointUrlInvalid); + } + + debug!("register_server({}, {:?}", discovery_endpoint_url, server); + let endpoints = self + .get_server_endpoints_from_url(discovery_endpoint_url.clone()) + .await?; + if endpoints.is_empty() { + return Err(StatusCode::BadUnexpectedError); + } + + let Some(endpoint) = endpoints + .iter() + .filter(|e| self.is_supported_endpoint(*e)) + .max_by(|a, b| a.security_level.cmp(&b.security_level)) + else { + error!("Cannot find an endpoint that we call register server on"); + return Err(StatusCode::BadUnexpectedError); + }; + + debug!( + "Registering this server via discovery endpoint {:?}", + endpoint + ); + + let session_info = SessionInfo { + endpoint: endpoint.clone(), + user_identity_token: IdentityToken::Anonymous, + preferred_locales: Vec::new(), + }; + let channel = self.channel_from_session_info(session_info); + + let mut evt_loop = channel.connect().await?; + + let send_fut = self.register_server_inner(server, &channel); + pin!(send_fut); + + let res = loop { + select! { + r = evt_loop.poll() => { + if let TransportPollResult::Closed(e) = r { + return Err(e); + } + }, + res = &mut send_fut => break res + } + }; + + channel.close_channel().await; + + loop { + if matches!(evt_loop.poll().await, TransportPollResult::Closed(_)) { + break; + } + } + + res + } +} diff --git a/lib/src/client/session/connect.rs b/lib/src/client/session/connect.rs new file mode 100644 index 000000000..860f367d1 --- /dev/null +++ b/lib/src/client/session/connect.rs @@ -0,0 +1,111 @@ +use std::sync::Arc; + +use tokio::{pin, select}; + +use crate::{ + client::transport::{SecureChannelEventLoop, TransportPollResult}, + types::{NodeId, StatusCode}, +}; + +use super::Session; + +/// This struct manages the task of connecting to the server. +/// It will only make a single attempt, so whatever is calling it is responsible for retries. +pub(super) struct SessionConnector { + inner: Arc, +} + +/// When the session connects to the server, this describes +/// how that happened, whether a new session was created, or an old session was reactivated. +#[derive(Debug, Clone)] +pub enum SessionConnectMode { + /// A new session was created with session ID given by the inner [`NodeId`] + NewSession(NodeId), + /// An old session was reactivated with session ID given by the inner [`NodeId`] + ReactivatedSession(NodeId), +} + +impl SessionConnector { + pub fn new(session: Arc) -> Self { + Self { inner: session } + } + + pub async fn try_connect( + &self, + ) -> Result<(SecureChannelEventLoop, SessionConnectMode), StatusCode> { + self.connect_and_activate().await + } + + async fn connect_and_activate( + &self, + ) -> Result<(SecureChannelEventLoop, SessionConnectMode), StatusCode> { + let mut event_loop = self.inner.channel.connect_no_retry().await?; + + let activate_fut = self.ensure_and_activate_session(); + pin!(activate_fut); + + let res = loop { + select! { + r = event_loop.poll() => { + if let TransportPollResult::Closed(c) = r { + return Err(c); + } + }, + r = &mut activate_fut => break r, + } + }; + + let id = match res { + Ok(id) => id, + Err(e) => { + self.inner.channel.close_channel().await; + + loop { + if matches!(event_loop.poll().await, TransportPollResult::Closed(_)) { + break; + } + } + + return Err(e); + } + }; + + drop(activate_fut); + + Ok((event_loop, id)) + } + + async fn ensure_and_activate_session(&self) -> Result { + let should_create_session = self.inner.session_id.load().is_null(); + + if should_create_session { + self.inner.create_session().await?; + } + + let reconnect = match self.inner.activate_session().await { + Err(status_code) if !should_create_session => { + info!( + "Session activation failed on reconnect, error = {}, creating a new session", + status_code + ); + self.inner.reset(); + let id = self.inner.create_session().await?; + self.inner.activate_session().await?; + SessionConnectMode::NewSession(id) + } + Err(e) => return Err(e), + Ok(_) => { + let session_id = (**self.inner.session_id.load()).clone(); + if should_create_session { + SessionConnectMode::NewSession(session_id) + } else { + SessionConnectMode::ReactivatedSession(session_id) + } + } + }; + + self.inner.transfer_subscriptions_from_old_session().await; + + Ok(reconnect) + } +} diff --git a/lib/src/client/session/event_loop.rs b/lib/src/client/session/event_loop.rs new file mode 100644 index 000000000..75b1f3065 --- /dev/null +++ b/lib/src/client/session/event_loop.rs @@ -0,0 +1,333 @@ +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use futures::{stream::BoxStream, Stream, StreamExt, TryStreamExt}; + +use crate::{ + client::{ + retry::{ExponentialBackoff, SessionRetryPolicy}, + session::{session_error, session_warn}, + transport::{SecureChannelEventLoop, TransportPollResult}, + }, + types::{AttributeId, QualifiedName, ReadValueId, StatusCode, TimestampsToReturn, VariableId}, +}; + +use super::{ + connect::{SessionConnectMode, SessionConnector}, + services::subscriptions::event_loop::{SubscriptionActivity, SubscriptionEventLoop}, + session::SessionState, + Session, +}; + +/// A list of possible events that happens while polling the session. +/// The client can use this list to monitor events such as disconnects, +/// publish failures, etc. +#[derive(Debug)] +#[non_exhaustive] +pub enum SessionPollResult { + /// A message was sent to or received from the server. + Transport(TransportPollResult), + /// Connection was lost with the inner [`StatusCode`]. + ConnectionLost(StatusCode), + /// Reconnecting to the server failed with the inner [`StatusCode`]. + ReconnectFailed(StatusCode), + /// Session was reconnected, the mode is given by the innner [`SessionConnectMode`] + Reconnected(SessionConnectMode), + /// The session performed some periodic activity. + SessionActivity(SessionActivity), + /// The session performed some subscription-related activity. + Subscription(SubscriptionActivity), + /// The session begins (re)connecting to the server. + BeginConnect, +} + +enum SessionEventLoopState { + Connected( + SecureChannelEventLoop, + BoxStream<'static, SessionActivity>, + BoxStream<'static, SubscriptionActivity>, + ), + Connecting(SessionConnector, ExponentialBackoff, Instant), + Disconnected, +} + +/// The session event loop drives the client. It must be polled for anything to happen at all. +#[must_use = "The session event loop must be started for the session to work"] +pub struct SessionEventLoop { + inner: Arc, + trigger_publish_recv: tokio::sync::watch::Receiver, + retry: SessionRetryPolicy, + keep_alive_interval: Duration, +} + +impl SessionEventLoop { + pub(crate) fn new( + inner: Arc, + retry: SessionRetryPolicy, + trigger_publish_recv: tokio::sync::watch::Receiver, + keep_alive_interval: Duration, + ) -> Self { + Self { + inner, + retry, + trigger_publish_recv, + keep_alive_interval, + } + } + + /// Convenience method for running the session event loop until completion, + /// this method will return once the session is closed manually, or + /// after it fails to reconnect. + /// + /// # Returns + /// + /// * `StatusCode` - [Status code](StatusCode) indicating how the session terminated. + pub async fn run(self) -> StatusCode { + let stream = self.enter(); + tokio::pin!(stream); + loop { + let r = stream.try_next().await; + + match r { + Ok(None) => break StatusCode::Good, + Err(e) => break e, + _ => (), + } + } + } + + /// Convenience method for running the session event loop until completion on a tokio task. + /// This method will return a [`JoinHandle`](tokio::task::JoinHandle) that will terminate + /// once the session is closed manually, or after it fails to reconnect. + /// + /// # Returns + /// + /// * `JoinHandle` - Handle to a tokio task wrapping the event loop. + pub fn spawn(self) -> tokio::task::JoinHandle { + tokio::task::spawn(self.run()) + } + + /// Start the event loop, returning a stream that must be polled until it is closed. + /// The stream will return `None` when the transport is closed manually, or + /// `Some(Err(StatusCode))` when the stream fails to reconnect after a loss of connection. + /// + /// It yields events from normal session operation, which can be used to take specific actions + /// based on changes to the session state. + pub fn enter(self) -> impl Stream> { + futures::stream::try_unfold( + (self, SessionEventLoopState::Disconnected), + |(slf, state)| async move { + let (res, state) = match state { + SessionEventLoopState::Connected(mut c, mut activity, mut subscriptions) => { + tokio::select! { + r = c.poll() => { + if let TransportPollResult::Closed(code) = r { + session_warn!(slf.inner, "Transport disconnected: {code}"); + let _ = slf.inner.state_watch_tx.send(SessionState::Disconnected); + + if code.is_good() { + return Ok(None); + } + + Ok(( + SessionPollResult::ConnectionLost(code), + SessionEventLoopState::Disconnected, + )) + } else { + Ok(( + SessionPollResult::Transport(r), + SessionEventLoopState::Connected(c, activity, subscriptions), + )) + } + } + r = activity.next() => { + // Should never be null, fail out + let Some(r) = r else { + session_error!(slf.inner, "Session activity loop ended unexpectedly"); + return Err(StatusCode::BadUnexpectedError); + }; + + Ok(( + SessionPollResult::SessionActivity(r), + SessionEventLoopState::Connected(c, activity, subscriptions), + )) + } + r = subscriptions.next() => { + // Should never be null, fail out + let Some(r) = r else { + session_error!(slf.inner, "Subscription event loop ended unexpectedly"); + return Err(StatusCode::BadUnexpectedError); + }; + + Ok(( + SessionPollResult::Subscription(r), + SessionEventLoopState::Connected(c, activity, subscriptions), + )) + } + } + } + SessionEventLoopState::Disconnected => { + let connector = SessionConnector::new(slf.inner.clone()); + + let _ = slf.inner.state_watch_tx.send(SessionState::Connecting); + + Ok(( + SessionPollResult::BeginConnect, + SessionEventLoopState::Connecting( + connector, + slf.retry.new_backoff(), + Instant::now(), + ), + )) + } + SessionEventLoopState::Connecting(connector, mut backoff, next_try) => { + tokio::time::sleep_until(next_try.into()).await; + + match connector.try_connect().await { + Ok((channel, result)) => { + let _ = slf.inner.state_watch_tx.send(SessionState::Connected); + Ok(( + SessionPollResult::Reconnected(result), + SessionEventLoopState::Connected( + channel, + SessionActivityLoop::new( + slf.inner.clone(), + slf.keep_alive_interval, + ) + .run() + .boxed(), + SubscriptionEventLoop::new( + slf.inner.clone(), + slf.trigger_publish_recv.clone(), + ) + .run() + .boxed(), + ), + )) + } + Err(e) => { + warn!("Failed to connect to server, status code: {e}"); + match backoff.next() { + Some(x) => Ok(( + SessionPollResult::ReconnectFailed(e), + SessionEventLoopState::Connecting( + connector, + backoff, + Instant::now() + x, + ), + )), + None => Err(e), + } + } + } + } + }?; + + Ok(Some((res, (slf, state)))) + }, + ) + } +} + +/// Periodic activity performed by the session. +#[derive(Debug, Clone)] +pub enum SessionActivity { + /// A keep alive request was sent to the server and a response was received with a successful state. + KeepAliveSucceeded, + /// A keep alive request was sent to the server, but it failed or the server was in an invalid state. + KeepAliveFailed(StatusCode), +} + +enum SessionTickEvent { + KeepAlive, +} + +struct SessionIntervals { + keep_alive: tokio::time::Interval, +} + +impl SessionIntervals { + pub fn new(keep_alive_interval: Duration) -> Self { + let mut keep_alive = tokio::time::interval(keep_alive_interval); + keep_alive.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + Self { keep_alive } + } + + pub async fn next(&mut self) -> SessionTickEvent { + tokio::select! { + _ = self.keep_alive.tick() => SessionTickEvent::KeepAlive + } + } +} + +struct SessionActivityLoop { + inner: Arc, + tick_gen: SessionIntervals, +} + +impl SessionActivityLoop { + pub fn new(inner: Arc, keep_alive_interval: Duration) -> Self { + Self { + inner, + tick_gen: SessionIntervals::new(keep_alive_interval), + } + } + + pub fn run(self) -> impl Stream { + futures::stream::unfold(self, |mut slf| async move { + match slf.tick_gen.next().await { + SessionTickEvent::KeepAlive => { + let res = slf + .inner + .read( + &[ReadValueId { + node_id: VariableId::Server_ServerStatus_State.into(), + attribute_id: AttributeId::Value as u32, + index_range: Default::default(), + data_encoding: QualifiedName::null(), + }], + TimestampsToReturn::Server, + 1f64, + ) + .await; + + let value = match res.map(|r| r.into_iter().next()) { + Ok(Some(dv)) => dv, + // Should not be possible, this would be a bug in the server, assume everything + // is terrible. + Ok(None) => { + return Some(( + SessionActivity::KeepAliveFailed(StatusCode::BadUnknownResponse), + slf, + )) + } + Err(e) => return Some((SessionActivity::KeepAliveFailed(e), slf)), + }; + + let Some(status): Option = value.value.and_then(|v| v.try_into().ok()) + else { + return Some(( + SessionActivity::KeepAliveFailed(StatusCode::BadUnknownResponse), + slf, + )); + }; + + match status { + // ServerState::Running + 0 => Some((SessionActivity::KeepAliveSucceeded, slf)), + s => { + warn!("Keep alive failed, non-running status code {s}"); + Some(( + SessionActivity::KeepAliveFailed(StatusCode::BadServerHalted), + slf, + )) + } + } + } + } + }) + } +} diff --git a/lib/src/client/session/mod.rs b/lib/src/client/session/mod.rs index 0402856d2..d539c7ad7 100644 --- a/lib/src/client/session/mod.rs +++ b/lib/src/client/session/mod.rs @@ -1,31 +1,119 @@ -pub mod services; -pub mod session; -pub mod session_state; +mod client; +mod connect; +mod event_loop; +mod services; +mod session; +/// Information about the server endpoint, security policy, security mode and user identity that the session will +/// will use to establish a connection. +#[derive(Debug, Clone)] +pub struct SessionInfo { + /// The endpoint + pub endpoint: EndpointDescription, + /// User identity token + pub user_identity_token: IdentityToken, + /// Preferred language locales + pub preferred_locales: Vec, +} + +impl From for SessionInfo { + fn from(value: EndpointDescription) -> Self { + Self { + endpoint: value, + user_identity_token: IdentityToken::Anonymous, + preferred_locales: Vec::new(), + } + } +} + +impl From<(EndpointDescription, IdentityToken)> for SessionInfo { + fn from(value: (EndpointDescription, IdentityToken)) -> Self { + Self { + endpoint: value.0, + user_identity_token: value.1, + preferred_locales: Vec::new(), + } + } +} + +pub use client::Client; +pub use connect::SessionConnectMode; +pub use event_loop::{SessionActivity, SessionEventLoop, SessionPollResult}; +pub use services::subscriptions::{ + DataChangeCallback, EventCallback, MonitoredItem, OnSubscriptionNotification, Subscription, + SubscriptionCallbacks, +}; +pub use session::Session; + +#[allow(unused)] macro_rules! session_warn { ($session: expr, $($arg:tt)*) => { - warn!("{} {}", $session.session_id(), format!($($arg)*)); + warn!("session:{} {}", $session.session_id(), format!($($arg)*)); } } +#[allow(unused)] pub(crate) use session_warn; +#[allow(unused)] macro_rules! session_error { ($session: expr, $($arg:tt)*) => { - error!("{} {}", $session.session_id(), format!($($arg)*)); + error!("session:{} {}", $session.session_id(), format!($($arg)*)); } } +#[allow(unused)] pub(crate) use session_error; +#[allow(unused)] macro_rules! session_debug { ($session: expr, $($arg:tt)*) => { - debug!("{} {}", $session.session_id(), format!($($arg)*)); + debug!("session:{} {}", $session.session_id(), format!($($arg)*)); } } +#[allow(unused)] pub(crate) use session_debug; +#[allow(unused)] macro_rules! session_trace { ($session: expr, $($arg:tt)*) => { - trace!("{} {}", $session.session_id(), format!($($arg)*)); + trace!("session:{} {}", $session.session_id(), format!($($arg)*)); } } +#[allow(unused)] pub(crate) use session_trace; + +use crate::{ + core::supported_message::SupportedMessage, + types::{EndpointDescription, ResponseHeader, StatusCode}, +}; + +use super::IdentityToken; + +/// Process the service result, i.e. where the request "succeeded" but the response +/// contains a failure status code. +pub(crate) fn process_service_result(response_header: &ResponseHeader) -> Result<(), StatusCode> { + if response_header.service_result.is_bad() { + info!( + "Received a bad service result {} from the request", + response_header.service_result + ); + Err(response_header.service_result) + } else { + Ok(()) + } +} + +pub(crate) fn process_unexpected_response(response: SupportedMessage) -> StatusCode { + match response { + SupportedMessage::ServiceFault(service_fault) => { + error!( + "Received a service fault of {} for the request", + service_fault.response_header.service_result + ); + service_fault.response_header.service_result + } + _ => { + error!("Received an unexpected response to the request"); + StatusCode::BadUnknownResponse + } + } +} diff --git a/lib/src/client/session/services.rs b/lib/src/client/session/services.rs deleted file mode 100644 index 4d62a6817..000000000 --- a/lib/src/client/session/services.rs +++ /dev/null @@ -1,914 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{convert::TryFrom, sync::mpsc::SyncSender}; - -use crate::{ - client::callbacks::OnSubscriptionNotification, - core::supported_message::SupportedMessage, - types::{ - node_ids::{MethodId, ObjectId}, - status_code::StatusCode, - *, - }, -}; - -/// Enumeration used with Session::history_read() -pub enum HistoryReadAction { - ReadEventDetails(ReadEventDetails), - ReadRawModifiedDetails(ReadRawModifiedDetails), - ReadProcessedDetails(ReadProcessedDetails), - ReadAtTimeDetails(ReadAtTimeDetails), -} - -impl From for ExtensionObject { - fn from(action: HistoryReadAction) -> Self { - match action { - HistoryReadAction::ReadEventDetails(v) => { - Self::from_encodable(ObjectId::ReadEventDetails_Encoding_DefaultBinary, &v) - } - HistoryReadAction::ReadRawModifiedDetails(v) => { - Self::from_encodable(ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, &v) - } - HistoryReadAction::ReadProcessedDetails(v) => { - Self::from_encodable(ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, &v) - } - HistoryReadAction::ReadAtTimeDetails(v) => { - Self::from_encodable(ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, &v) - } - } - } -} - -/// Enumeration used with Session::history_update() -pub enum HistoryUpdateAction { - UpdateDataDetails(UpdateDataDetails), - UpdateStructureDataDetails(UpdateStructureDataDetails), - UpdateEventDetails(UpdateEventDetails), - DeleteRawModifiedDetails(DeleteRawModifiedDetails), - DeleteAtTimeDetails(DeleteAtTimeDetails), - DeleteEventDetails(DeleteEventDetails), -} - -impl From<&HistoryUpdateAction> for ExtensionObject { - fn from(action: &HistoryUpdateAction) -> Self { - match action { - HistoryUpdateAction::UpdateDataDetails(v) => { - Self::from_encodable(ObjectId::UpdateDataDetails_Encoding_DefaultBinary, v) - } - HistoryUpdateAction::UpdateStructureDataDetails(v) => Self::from_encodable( - ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary, - v, - ), - HistoryUpdateAction::UpdateEventDetails(v) => { - Self::from_encodable(ObjectId::UpdateEventDetails_Encoding_DefaultBinary, v) - } - HistoryUpdateAction::DeleteRawModifiedDetails(v) => { - Self::from_encodable(ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, v) - } - HistoryUpdateAction::DeleteAtTimeDetails(v) => { - Self::from_encodable(ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, v) - } - HistoryUpdateAction::DeleteEventDetails(v) => { - Self::from_encodable(ObjectId::DeleteEventDetails_Encoding_DefaultBinary, v) - } - } - } -} - -pub trait Service { - fn make_request_header(&self) -> RequestHeader; - - /// Synchronously sends a request. The return value is the response to the request - fn send_request(&self, request: T) -> Result - where - T: Into; - - /// Asynchronously sends a request. The return value is the request handle of the request - fn async_send_request( - &self, - request: T, - sender: Option>, - ) -> Result - where - T: Into; -} - -/// Discovery Service set -pub trait DiscoveryService: Service { - /// Sends a [`FindServersRequest`] to the server denoted by the discovery url. - /// - /// See OPC UA Part 4 - Services 5.4.2 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `endpoint_url` - The network address that the Client used to access the Discovery Endpoint. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of [`ApplicationDescription`] that meet criteria specified in the request. - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`FindServersRequest`]: ./struct.FindServersRequest.html - /// [`ApplicationDescription`]: ./struct.ApplicationDescription.html - /// - fn find_servers(&self, endpoint_url: T) -> Result, StatusCode> - where - T: Into; - - /// Obtain the list of endpoints supported by the server by sending it a [`GetEndpointsRequest`]. - /// - /// See OPC UA Part 4 - Services 5.4.4 for complete description of the service and error responses. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of endpoints supported by the server - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`GetEndpointsRequest`]: ./struct.GetEndpointsRequest.html - /// - fn get_endpoints(&self) -> Result, StatusCode>; - - /// This function is used by servers that wish to register themselves with a discovery server. - /// i.e. one server is the client to another server. The server sends a [`RegisterServerRequest`] - /// to the discovery server to register itself. Servers are expected to re-register themselves periodically - /// with the discovery server, with a maximum of 10 minute intervals. - /// - /// See OPC UA Part 4 - Services 5.4.5 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `server` - The server to register - /// - /// # Returns - /// - /// * `Ok(())` - Success - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`RegisterServerRequest`]: ./struct.RegisterServerRequest.html - /// - fn register_server(&self, server: RegisteredServer) -> Result<(), StatusCode>; -} - -/// SecureChannel Service set -pub trait SecureChannelService: Service { - /// Sends an [`OpenSecureChannelRequest`] to the server - /// - /// - /// See OPC UA Part 4 - Services 5.5.2 for complete description of the service and error responses. - /// # Returns - /// - /// * `Ok(())` - Success - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`OpenSecureChannelRequest`]: ./struct.OpenSecureChannelRequest.html - /// - fn open_secure_channel(&self) -> Result<(), StatusCode>; - - /// Sends a [`CloseSecureChannelRequest`] to the server which will cause the server to drop - /// the connection. - /// - /// See OPC UA Part 4 - Services 5.5.3 for complete description of the service and error responses. - /// - /// # Returns - /// - /// * `Ok(())` - Success - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`CloseSecureChannelRequest`]: ./struct.CloseSecureChannelRequest.html - /// - fn close_secure_channel(&self) -> Result<(), StatusCode>; -} - -/// Session Service set -pub trait SessionService: Service { - /// Sends a [`CreateSessionRequest`] to the server, returning the session id of the created - /// session. Internally, the session will store the authentication token which is used for requests - /// subsequent to this call. - /// - /// See OPC UA Part 4 - Services 5.6.2 for complete description of the service and error responses. - /// - /// # Returns - /// - /// * `Ok(NodeId)` - Success, session id - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`CreateSessionRequest`]: ./struct.CreateSessionRequest.html - /// - fn create_session(&self) -> Result; - - /// Sends an [`ActivateSessionRequest`] to the server to activate this session - /// - /// See OPC UA Part 4 - Services 5.6.3 for complete description of the service and error responses. - /// - /// # Returns - /// - /// * `Ok(())` - Success - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`ActivateSessionRequest`]: ./struct.ActivateSessionRequest.html - /// - fn activate_session(&self) -> Result<(), StatusCode>; - - /// Cancels an outstanding service request by sending a [`CancelRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.6.5 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `request_handle` - Handle to the outstanding request to be cancelled. - /// - /// # Returns - /// - /// * `Ok(u32)` - Success, number of cancelled requests - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`CancelRequest`]: ./struct.CancelRequest.html - /// - fn cancel(&self, request_handle: IntegerId) -> Result; -} - -/// NodeManagement Service set - -pub trait NodeManagementService: Service { - /// Add nodes by sending a [`AddNodesRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.7.2 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_add` - A list of [`AddNodesItem`] to be added to the server. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of [`AddNodesResult`] corresponding to each add node operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`AddNodesRequest`]: ./struct.AddNodesRequest.html - /// [`AddNodesItem`]: ./struct.AddNodesItem.html - /// [`AddNodesResult`]: ./struct.AddNodesResult.html - /// - fn add_nodes(&self, nodes_to_add: &[AddNodesItem]) -> Result, StatusCode>; - - /// Add references by sending a [`AddReferencesRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.7.3 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `references_to_add` - A list of [`AddReferencesItem`] to be sent to the server. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of `StatusCode` corresponding to each add reference operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`AddReferencesRequest`]: ./struct.AddReferencesRequest.html - /// [`AddReferencesItem`]: ./struct.AddReferencesItem.html - /// - fn add_references( - &self, - references_to_add: &[AddReferencesItem], - ) -> Result, StatusCode>; - - /// Delete nodes by sending a [`DeleteNodesRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.7.4 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_delete` - A list of [`DeleteNodesItem`] to be sent to the server. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of `StatusCode` corresponding to each delete node operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`DeleteNodesRequest`]: ./struct.DeleteNodesRequest.html - /// [`DeleteNodesItem`]: ./struct.DeleteNodesItem.html - /// - fn delete_nodes( - &self, - nodes_to_delete: &[DeleteNodesItem], - ) -> Result, StatusCode>; - - /// Delete references by sending a [`DeleteReferencesRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.7.5 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_delete` - A list of [`DeleteReferencesItem`] to be sent to the server. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of `StatusCode` corresponding to each delete node operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`DeleteReferencesRequest`]: ./struct.DeleteReferencesRequest.html - /// [`DeleteReferencesItem`]: ./struct.DeleteReferencesItem.html - /// - fn delete_references( - &self, - references_to_delete: &[DeleteReferencesItem], - ) -> Result, StatusCode>; -} - -/// View Service set -pub trait ViewService: Service { - /// Discover the references to the specified nodes by sending a [`BrowseRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.8.2 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_browse` - A list of [`BrowseDescription`] describing nodes to browse. - /// - /// # Returns - /// - /// * `Ok(Option)` - A list [`BrowseResult`] corresponding to each node to browse. A browse result - /// may contain a continuation point, for use with `browse_next()`. - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`BrowseRequest`]: ./struct.BrowseRequest.html - /// [`BrowseDescription`]: ./struct.BrowseDescription.html - /// [`BrowseResult`]: ./struct.BrowseResult.html - /// - fn browse( - &self, - nodes_to_browse: &[BrowseDescription], - ) -> Result>, StatusCode>; - - /// Continue to discover references to nodes by sending continuation points in a [`BrowseNextRequest`] - /// to the server. This function may have to be called repeatedly to process the initial query. - /// - /// See OPC UA Part 4 - Services 5.8.3 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `release_continuation_points` - Flag indicating if the continuation points should be released by the server - /// * `continuation_points` - A list of [`BrowseDescription`] continuation points - /// - /// # Returns - /// - /// * `Ok(Option)` - A list [`BrowseResult`] corresponding to each node to browse. A browse result - /// may contain a continuation point, for use with `browse_next()`. - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`BrowseRequest`]: ./struct.BrowseRequest.html - /// [`BrowseNextRequest`]: ./struct.BrowseNextRequest.html - /// [`BrowseResult`]: ./struct.BrowseResult.html - /// - fn browse_next( - &self, - release_continuation_points: bool, - continuation_points: &[ByteString], - ) -> Result>, StatusCode>; - - /// Translate browse paths to NodeIds by sending a [`TranslateBrowsePathsToNodeIdsRequest`] request to the Server - /// Each [`BrowsePath`] is constructed of a starting node and a `RelativePath`. The specified starting node - /// identifies the node from which the RelativePath is based. The RelativePath contains a sequence of - /// ReferenceTypes and BrowseNames. - /// - /// See OPC UA Part 4 - Services 5.8.4 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `browse_paths` - A list of [`BrowsePath`] node + relative path for the server to look up - /// - /// # Returns - /// - /// * `Ok(Vec>)` - List of [`BrowsePathResult`] for the list of browse - /// paths. The size and order of the list matches the size and order of the `browse_paths` - /// parameter. - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`TranslateBrowsePathsToNodeIdsRequest`]: ./struct.TranslateBrowsePathsToNodeIdsRequest.html - /// [`BrowsePath`]: ./struct.BrowsePath.html - /// [`BrowsePathResult`]: ./struct.BrowsePathResult.html - fn translate_browse_paths_to_node_ids( - &self, - browse_paths: &[BrowsePath], - ) -> Result, StatusCode>; - - /// Register nodes on the server by sending a [`RegisterNodesRequest`]. The purpose of this - /// call is server-dependent but allows a client to ask a server to create nodes which are - /// otherwise expensive to set up or maintain, e.g. nodes attached to hardware. - /// - /// See OPC UA Part 4 - Services 5.8.5 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_register` - A list of [`NodeId`] nodes for the server to register - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of [`NodeId`] corresponding to size and order of the input. The - /// server may return an alias for the input `NodeId` - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`RegisterNodesRequest`]: ./struct.RegisterNodesRequest.html - /// [`NodeId`]: ./struct.NodeId.html - fn register_nodes(&self, nodes_to_register: &[NodeId]) -> Result, StatusCode>; - - /// Unregister nodes on the server by sending a [`UnregisterNodesRequest`]. This indicates to - /// the server that the client relinquishes any need for these nodes. The server will ignore - /// unregistered nodes. - /// - /// See OPC UA Part 4 - Services 5.8.5 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_unregister` - A list of [`NodeId`] nodes for the server to unregister - /// - /// # Returns - /// - /// * `Ok(())` - Request succeeded, server ignores invalid nodes - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`UnregisterNodesRequest`]: ./struct.UnregisterNodesRequest.html - /// [`NodeId`]: ./struct.NodeId.html - /// - fn unregister_nodes(&self, nodes_to_unregister: &[NodeId]) -> Result<(), StatusCode>; -} - -/// Attribute Service set -pub trait AttributeService: Service { - /// Reads the value of nodes by sending a [`ReadRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.10.2 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_read` - A list of [`ReadValueId`] to be read by the server. - /// * `timestamps_to_return` - The [`TimestampsToReturn`] for each node, Both, Server, Source or None - /// * `max_age` - The maximum age of value to read in milliseconds. Read the service description - /// for details. Basically it will attempt to read a value within the age range or - /// attempt to read a new value. If 0 the server will attempt to read a new value from the datasource. - /// If set to `i32::MAX` or greater, the server shall attempt to get a cached value. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of [`DataValue`] corresponding to each read operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`ReadRequest`]: ./struct.ReadRequest.html - /// [`ReadValueId`]: ./struct.ReadValueId.html - /// [`DataValue`]: ./struct.DataValue.html - /// - fn read( - &self, - nodes_to_read: &[ReadValueId], - timestamps_to_return: TimestampsToReturn, - max_age: f64, - ) -> Result, StatusCode>; - - /// Reads historical values or events of one or more nodes. The caller is expected to provide - /// a HistoryReadAction enum which must be one of the following: - /// - /// * HistoryReadAction::ReadEventDetails - /// * HistoryReadAction::ReadRawModifiedDetails - /// * HistoryReadAction::ReadProcessedDetails - /// * HistoryReadAction::ReadAtTimeDetails - /// - /// See OPC UA Part 4 - Services 5.10.3 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `history_read_details` - A history read operation encoded in an `ExtensionObject`. - /// * `timestamps_to_return` - Enumeration of which timestamps to return. - /// * `release_continuation_points` - Flag indicating whether to release the continuation point for the operation. - /// * `nodes_to_read` - The list of `HistoryReadValueId` of the nodes to apply the history read operation to. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of `HistoryReadResult` results corresponding to history read operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - fn history_read( - &self, - history_read_details: HistoryReadAction, - timestamps_to_return: TimestampsToReturn, - release_continuation_points: bool, - nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode>; - - /// Writes values to nodes by sending a [`WriteRequest`] to the server. Note that some servers may reject DataValues - /// containing source or server timestamps. - /// - /// See OPC UA Part 4 - Services 5.10.4 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `nodes_to_write` - A list of [`WriteValue`] to be sent to the server. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of `StatusCode` results corresponding to each write operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`WriteRequest`]: ./struct.WriteRequest.html - /// [`WriteValue`]: ./struct.WriteValue.html - /// - fn write(&self, nodes_to_write: &[WriteValue]) -> Result, StatusCode>; - - /// Updates historical values. The caller is expected to provide one or more history update operations - /// in a slice of HistoryUpdateAction enums which are one of the following: - /// - /// * UpdateDataDetails - /// * UpdateStructureDataDetails - /// * UpdateEventDetails - /// * DeleteRawModifiedDetails - /// * DeleteAtTimeDetails - /// * DeleteEventDetails - /// - /// See OPC UA Part 4 - Services 5.10.5 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `history_update_details` - A list of history update operations each encoded as an `ExtensionObject`. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of `ClientHistoryUpdateResult` results corresponding to history update operation. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - fn history_update( - &self, - history_update_details: &[HistoryUpdateAction], - ) -> Result, StatusCode>; -} - -/// Method Service set -pub trait MethodService: Service { - /// Calls a single method on an object on the server by sending a [`CallRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.11.2 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `method` - The method to call. Note this function takes anything that can be turned into - /// a [`CallMethodRequest`] which includes a (`NodeId`, `NodeId`, `Option>`) - /// which refers to the object id, method id, and input arguments respectively. - /// - /// # Returns - /// - /// * `Ok(CallMethodResult)` - A `[CallMethodResult]` for the Method call. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`CallRequest`]: ./struct.CallRequest.html - /// [`CallMethodRequest`]: ./struct.CallMethodRequest.html - /// [`CallMethodResult`]: ./struct.CallMethodResult.html - /// - fn call(&self, method: T) -> Result - where - T: Into; - - /// Calls GetMonitoredItems via call_method(), putting a sane interface on the input / output. - /// - /// # Arguments - /// - /// * `subscription_id` - Server allocated identifier for the subscription to return monitored items for. - /// - /// # Returns - /// - /// * `Ok((Vec, Vec))` - Result for call, consisting a list of (monitored_item_id, client_handle) - /// * `Err(StatusCode)` - Status code reason for failure. - /// - fn call_get_monitored_items( - &self, - subscription_id: u32, - ) -> Result<(Vec, Vec), StatusCode> { - let args = Some(vec![Variant::from(subscription_id)]); - let object_id: NodeId = ObjectId::Server.into(); - let method_id: NodeId = MethodId::Server_GetMonitoredItems.into(); - let request: CallMethodRequest = (object_id, method_id, args).into(); - let response = self.call(request)?; - if let Some(mut result) = response.output_arguments { - if result.len() == 2 { - let server_handles = >::try_from(&result.remove(0)) - .map_err(|_| StatusCode::BadUnexpectedError)?; - let client_handles = >::try_from(&result.remove(0)) - .map_err(|_| StatusCode::BadUnexpectedError)?; - Ok((server_handles, client_handles)) - } else { - error!("Expected a result with 2 args and didn't get it."); - Err(StatusCode::BadUnexpectedError) - } - } else { - error!("Expected a result and didn't get it."); - Err(StatusCode::BadUnexpectedError) - } - } -} - -//////////////////////////////////////////////////////////////////////////////////////////////// -// MonitoredItem Service set -//////////////////////////////////////////////////////////////////////////////////////////////// -pub trait MonitoredItemService: Service { - /// Creates monitored items on a subscription by sending a [`CreateMonitoredItemsRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.12.2 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_id` - The Server-assigned identifier for the Subscription that will report Notifications for this MonitoredItem - /// * `timestamps_to_return` - An enumeration that specifies the timestamp Attributes to be transmitted for each MonitoredItem. - /// * `items_to_create` - A list of [`MonitoredItemCreateRequest`] to be created and assigned to the specified Subscription. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of [`MonitoredItemCreateResult`] corresponding to the items to create. - /// The size and order of the list matches the size and order of the `items_to_create` request parameter. - /// * `Err(StatusCode)` - Status code reason for failure - /// - /// [`CreateMonitoredItemsRequest`]: ./struct.CreateMonitoredItemsRequest.html - /// [`MonitoredItemCreateRequest`]: ./struct.MonitoredItemCreateRequest.html - /// [`MonitoredItemCreateResult`]: ./struct.MonitoredItemCreateResult.html - /// - fn create_monitored_items( - &self, - subscription_id: u32, - timestamps_to_return: TimestampsToReturn, - items_to_create: &[MonitoredItemCreateRequest], - ) -> Result, StatusCode>; - - /// Modifies monitored items on a subscription by sending a [`ModifyMonitoredItemsRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.12.3 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_id` - The Server-assigned identifier for the Subscription that will report Notifications for this MonitoredItem. - /// * `timestamps_to_return` - An enumeration that specifies the timestamp Attributes to be transmitted for each MonitoredItem. - /// * `items_to_modify` - The list of [`MonitoredItemModifyRequest`] to modify. - /// - /// # Returns - /// - /// * `Ok(Vec)` - A list of [`MonitoredItemModifyResult`] corresponding to the MonitoredItems to modify. - /// The size and order of the list matches the size and order of the `items_to_modify` request parameter. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`ModifyMonitoredItemsRequest`]: ./struct.ModifyMonitoredItemsRequest.html - /// [`MonitoredItemModifyRequest`]: ./struct.MonitoredItemModifyRequest.html - /// [`MonitoredItemModifyResult`]: ./struct.MonitoredItemModifyResult.html - /// - fn modify_monitored_items( - &self, - subscription_id: u32, - timestamps_to_return: TimestampsToReturn, - items_to_modify: &[MonitoredItemModifyRequest], - ) -> Result, StatusCode>; - - /// Sets the monitoring mode on one or more monitored items by sending a [`SetMonitoringModeRequest`] - /// to the server. - /// - /// See OPC UA Part 4 - Services 5.12.4 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_id` - the subscription identifier containing the monitored items to be modified. - /// * `monitoring_mode` - the monitored mode to apply to the monitored items - /// * `monitored_item_ids` - the monitored items to be modified - /// - /// # Returns - /// - /// * `Ok(Vec)` - Individual result for each monitored item. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`SetMonitoringModeRequest`]: ./struct.SetMonitoringModeRequest.html - /// - fn set_monitoring_mode( - &self, - subscription_id: u32, - monitoring_mode: MonitoringMode, - monitored_item_ids: &[u32], - ) -> Result, StatusCode>; - - /// Sets a monitored item so it becomes the trigger that causes other monitored items to send - /// change events in the same update. Sends a [`SetTriggeringRequest`] to the server. - /// Note that `items_to_remove` is applied before `items_to_add`. - /// - /// See OPC UA Part 4 - Services 5.12.5 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_id` - the subscription identifier containing the monitored item to be used as the trigger. - /// * `monitored_item_id` - the monitored item that is the trigger. - /// * `links_to_add` - zero or more items to be added to the monitored item's triggering list. - /// * `items_to_remove` - zero or more items to be removed from the monitored item's triggering list. - /// - /// # Returns - /// - /// * `Ok((Option>, Option>))` - Individual result for each item added / removed for the SetTriggering call. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`SetTriggeringRequest`]: ./struct.SetTriggeringRequest.html - /// - fn set_triggering( - &self, - subscription_id: u32, - triggering_item_id: u32, - links_to_add: &[u32], - links_to_remove: &[u32], - ) -> Result<(Option>, Option>), StatusCode>; - - /// Deletes monitored items from a subscription by sending a [`DeleteMonitoredItemsRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.12.6 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_id` - The Server-assigned identifier for the Subscription that will report Notifications for this MonitoredItem. - /// * `items_to_delete` - List of Server-assigned ids for the MonitoredItems to be deleted. - /// - /// # Returns - /// - /// * `Ok(Vec)` - List of StatusCodes for the MonitoredItems to delete. The size and - /// order of the list matches the size and order of the `items_to_delete` request parameter. - /// * `Err(StatusCode)` - Status code reason for failure. - /// - /// [`DeleteMonitoredItemsRequest`]: ./struct.DeleteMonitoredItemsRequest.html - /// - fn delete_monitored_items( - &self, - subscription_id: u32, - items_to_delete: &[u32], - ) -> Result, StatusCode>; -} - -//////////////////////////////////////////////////////////////////////////////////////////////// -// Subscription Service set -//////////////////////////////////////////////////////////////////////////////////////////////// -pub trait SubscriptionService: Service { - /// Create a subscription by sending a [`CreateSubscriptionRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.13.2 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `publishing_interval` - The requested publishing interval defines the cyclic rate that - /// the Subscription is being requested to return Notifications to the Client. This interval - /// is expressed in milliseconds. This interval is represented by the publishing timer in the - /// Subscription state table. The negotiated value for this parameter returned in the - /// response is used as the default sampling interval for MonitoredItems assigned to this - /// Subscription. If the requested value is 0 or negative, the server shall revise with the - /// fastest supported publishing interval in milliseconds. - /// * `lifetime_count` - Requested lifetime count. The lifetime count shall be a minimum of - /// three times the keep keep-alive count. When the publishing timer has expired this - /// number of times without a Publish request being available to send a NotificationMessage, - /// then the Subscription shall be deleted by the Server. - /// * `max_keep_alive_count` - Requested maximum keep-alive count. When the publishing timer has - /// expired this number of times without requiring any NotificationMessage to be sent, the - /// Subscription sends a keep-alive Message to the Client. The negotiated value for this - /// parameter is returned in the response. If the requested value is 0, the server shall - /// revise with the smallest supported keep-alive count. - /// * `max_notifications_per_publish` - The maximum number of notifications that the Client - /// wishes to receive in a single Publish response. A value of zero indicates that there is - /// no limit. The number of notifications per Publish is the sum of monitoredItems in - /// the DataChangeNotification and events in the EventNotificationList. - /// * `priority` - Indicates the relative priority of the Subscription. When more than one - /// Subscription needs to send Notifications, the Server should de-queue a Publish request - /// to the Subscription with the highest priority number. For Subscriptions with equal - /// priority the Server should de-queue Publish requests in a round-robin fashion. - /// A Client that does not require special priority settings should set this value to zero. - /// * `publishing_enabled` - A boolean parameter with the following values - `true` publishing - /// is enabled for the Subscription, `false`, publishing is disabled for the Subscription. - /// The value of this parameter does not affect the value of the monitoring mode Attribute of - /// MonitoredItems. - /// - /// # Returns - /// - /// * `Ok(u32)` - identifier for new subscription - /// * `Err(StatusCode)` - Status code reason for failure - /// - /// [`CreateSubscriptionRequest`]: ./struct.CreateSubscriptionRequest.html - /// - fn create_subscription( - &self, - publishing_interval: f64, - lifetime_count: u32, - max_keep_alive_count: u32, - max_notifications_per_publish: u32, - priority: u8, - publishing_enabled: bool, - callback: CB, - ) -> Result - where - CB: OnSubscriptionNotification + Send + Sync + 'static; - - /// Modifies a subscription by sending a [`ModifySubscriptionRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.13.3 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_id` - subscription identifier returned from `create_subscription`. - /// - /// See `create_subscription` for description of other parameters - /// - /// # Returns - /// - /// * `Ok(())` - Success - /// * `Err(StatusCode)` - Request failed, status code is the reason for failure - /// - /// [`ModifySubscriptionRequest`]: ./struct.ModifySubscriptionRequest.html - /// - fn modify_subscription( - &self, - subscription_id: u32, - publishing_interval: f64, - lifetime_count: u32, - max_keep_alive_count: u32, - max_notifications_per_publish: u32, - priority: u8, - ) -> Result<(), StatusCode>; - - /// Changes the publishing mode of subscriptions by sending a [`SetPublishingModeRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.13.4 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_ids` - one or more subscription identifiers. - /// * `publishing_enabled` - A boolean parameter with the following values - `true` publishing - /// is enabled for the Subscriptions, `false`, publishing is disabled for the Subscriptions. - /// - /// # Returns - /// - /// * `Ok(Vec)` - Service return code for the action for each id, `Good` or `BadSubscriptionIdInvalid` - /// * `Err(StatusCode)` - Status code reason for failure - /// - /// [`SetPublishingModeRequest`]: ./struct.SetPublishingModeRequest.html - /// - fn set_publishing_mode( - &self, - subscription_ids: &[u32], - publishing_enabled: bool, - ) -> Result, StatusCode>; - - /// Transfers Subscriptions and their MonitoredItems from one Session to another. For example, - /// a Client may need to reopen a Session and then transfer its Subscriptions to that Session. - /// It may also be used by one Client to take over a Subscription from another Client by - /// transferring the Subscription to its Session. - /// - /// See OPC UA Part 4 - Services 5.13.7 for complete description of the service and error responses. - /// - /// * `subscription_ids` - one or more subscription identifiers. - /// * `send_initial_values` - A boolean parameter with the following values - `true` the first - /// publish response shall contain the current values of all monitored items in the subscription, - /// `false`, the first publish response shall contain only the value changes since the last - /// publish response was sent. - /// - /// # Returns - /// - /// * `Ok(Vec)` - The [`TransferResult`] for each transfer subscription. - /// * `Err(StatusCode)` - Status code reason for failure - /// - /// [`TransferSubscriptionsRequest`]: ./struct.TransferSubscriptionsRequest.html - /// [`TransferResult`]: ./struct.TransferResult.html - /// - fn transfer_subscriptions( - &self, - subscription_ids: &[u32], - send_initial_values: bool, - ) -> Result, StatusCode>; - - /// Deletes a subscription by sending a [`DeleteSubscriptionsRequest`] to the server. - /// - /// See OPC UA Part 4 - Services 5.13.8 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_id` - subscription identifier returned from `create_subscription`. - /// - /// # Returns - /// - /// * `Ok(StatusCode)` - Service return code for the delete action, `Good` or `BadSubscriptionIdInvalid` - /// * `Err(StatusCode)` - Status code reason for failure - /// - /// [`DeleteSubscriptionsRequest`]: ./struct.DeleteSubscriptionsRequest.html - /// - fn delete_subscription(&self, subscription_id: u32) -> Result; - - /// Deletes subscriptions by sending a [`DeleteSubscriptionsRequest`] to the server with the list - /// of subscriptions to delete. - /// - /// See OPC UA Part 4 - Services 5.13.8 for complete description of the service and error responses. - /// - /// # Arguments - /// - /// * `subscription_ids` - List of subscription identifiers to delete. - /// - /// # Returns - /// - /// * `Ok(Vec)` - List of result for delete action on each id, `Good` or `BadSubscriptionIdInvalid` - /// The size and order of the list matches the size and order of the input. - /// * `Err(StatusCode)` - Status code reason for failure - /// - /// [`DeleteSubscriptionsRequest`]: ./struct.DeleteSubscriptionsRequest.html - /// - fn delete_subscriptions(&self, subscription_ids: &[u32]) - -> Result, StatusCode>; -} diff --git a/lib/src/client/session/services/attributes.rs b/lib/src/client/session/services/attributes.rs new file mode 100644 index 000000000..2a49b7450 --- /dev/null +++ b/lib/src/client/session/services/attributes.rs @@ -0,0 +1,297 @@ +use crate::{ + client::{ + session::{ + process_service_result, process_unexpected_response, session_debug, session_error, + }, + Session, + }, + core::supported_message::SupportedMessage, + types::{ + DataValue, DeleteAtTimeDetails, DeleteEventDetails, DeleteRawModifiedDetails, + ExtensionObject, HistoryReadRequest, HistoryReadResult, HistoryReadValueId, + HistoryUpdateRequest, HistoryUpdateResult, ObjectId, ReadAtTimeDetails, ReadEventDetails, + ReadProcessedDetails, ReadRawModifiedDetails, ReadRequest, ReadValueId, StatusCode, + TimestampsToReturn, UpdateDataDetails, UpdateEventDetails, UpdateStructureDataDetails, + WriteRequest, WriteValue, + }, +}; + +/// Enumeration used with Session::history_read() +pub enum HistoryReadAction { + ReadEventDetails(ReadEventDetails), + ReadRawModifiedDetails(ReadRawModifiedDetails), + ReadProcessedDetails(ReadProcessedDetails), + ReadAtTimeDetails(ReadAtTimeDetails), +} + +impl From for ExtensionObject { + fn from(action: HistoryReadAction) -> Self { + match action { + HistoryReadAction::ReadEventDetails(v) => { + Self::from_encodable(ObjectId::ReadEventDetails_Encoding_DefaultBinary, &v) + } + HistoryReadAction::ReadRawModifiedDetails(v) => { + Self::from_encodable(ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, &v) + } + HistoryReadAction::ReadProcessedDetails(v) => { + Self::from_encodable(ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, &v) + } + HistoryReadAction::ReadAtTimeDetails(v) => { + Self::from_encodable(ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, &v) + } + } + } +} + +/// Enumeration used with Session::history_update() +pub enum HistoryUpdateAction { + UpdateDataDetails(UpdateDataDetails), + UpdateStructureDataDetails(UpdateStructureDataDetails), + UpdateEventDetails(UpdateEventDetails), + DeleteRawModifiedDetails(DeleteRawModifiedDetails), + DeleteAtTimeDetails(DeleteAtTimeDetails), + DeleteEventDetails(DeleteEventDetails), +} + +impl From<&HistoryUpdateAction> for ExtensionObject { + fn from(action: &HistoryUpdateAction) -> Self { + match action { + HistoryUpdateAction::UpdateDataDetails(v) => { + Self::from_encodable(ObjectId::UpdateDataDetails_Encoding_DefaultBinary, v) + } + HistoryUpdateAction::UpdateStructureDataDetails(v) => Self::from_encodable( + ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary, + v, + ), + HistoryUpdateAction::UpdateEventDetails(v) => { + Self::from_encodable(ObjectId::UpdateEventDetails_Encoding_DefaultBinary, v) + } + HistoryUpdateAction::DeleteRawModifiedDetails(v) => { + Self::from_encodable(ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, v) + } + HistoryUpdateAction::DeleteAtTimeDetails(v) => { + Self::from_encodable(ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, v) + } + HistoryUpdateAction::DeleteEventDetails(v) => { + Self::from_encodable(ObjectId::DeleteEventDetails_Encoding_DefaultBinary, v) + } + } + } +} + +impl Session { + /// Reads the value of nodes by sending a [`ReadRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.10.2 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_read` - A list of [`ReadValueId`] to be read by the server. + /// * `timestamps_to_return` - The [`TimestampsToReturn`] for each node, Both, Server, Source or None + /// * `max_age` - The maximum age of value to read in milliseconds. Read the service description + /// for details. Basically it will attempt to read a value within the age range or + /// attempt to read a new value. If 0 the server will attempt to read a new value from the datasource. + /// If set to `i32::MAX` or greater, the server shall attempt to get a cached value. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`DataValue`] corresponding to each read operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn read( + &self, + nodes_to_read: &[ReadValueId], + timestamps_to_return: TimestampsToReturn, + max_age: f64, + ) -> Result, StatusCode> { + if nodes_to_read.is_empty() { + // No subscriptions + session_error!(self, "read(), was not supplied with any nodes to read"); + Err(StatusCode::BadNothingToDo) + } else { + session_debug!(self, "read() requested to read nodes {:?}", nodes_to_read); + let request = ReadRequest { + request_header: self.make_request_header(), + max_age, + timestamps_to_return, + nodes_to_read: Some(nodes_to_read.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::ReadResponse(response) = response { + session_debug!(self, "read(), success"); + process_service_result(&response.response_header)?; + let results = if let Some(results) = response.results { + results + } else { + Vec::new() + }; + Ok(results) + } else { + session_error!(self, "read() value failed"); + Err(process_unexpected_response(response)) + } + } + } + + /// Reads historical values or events of one or more nodes. The caller is expected to provide + /// a HistoryReadAction enum which must be one of the following: + /// + /// * [`ReadEventDetails`] + /// * [`ReadRawModifiedDetails`] + /// * [`ReadProcessedDetails`] + /// * [`ReadAtTimeDetails`] + /// + /// See OPC UA Part 4 - Services 5.10.3 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `history_read_details` - A history read operation. + /// * `timestamps_to_return` - Enumeration of which timestamps to return. + /// * `release_continuation_points` - Flag indicating whether to release the continuation point for the operation. + /// * `nodes_to_read` - The list of [`HistoryReadValueId`] of the nodes to apply the history read operation to. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`HistoryReadResult`] results corresponding to history read operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn history_read( + &self, + history_read_details: HistoryReadAction, + timestamps_to_return: TimestampsToReturn, + release_continuation_points: bool, + nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { + // Turn the enum into an extension object + let history_read_details = ExtensionObject::from(history_read_details); + let request = HistoryReadRequest { + request_header: self.make_request_header(), + history_read_details, + timestamps_to_return, + release_continuation_points, + nodes_to_read: if nodes_to_read.is_empty() { + None + } else { + Some(nodes_to_read.to_vec()) + }, + }; + session_debug!( + self, + "history_read() requested to read nodes {:?}", + nodes_to_read + ); + let response = self.send(request).await?; + if let SupportedMessage::HistoryReadResponse(response) = response { + session_debug!(self, "history_read(), success"); + process_service_result(&response.response_header)?; + let results = if let Some(results) = response.results { + results + } else { + Vec::new() + }; + Ok(results) + } else { + session_error!(self, "history_read() value failed"); + Err(process_unexpected_response(response)) + } + } + + /// Writes values to nodes by sending a [`WriteRequest`] to the server. Note that some servers may reject DataValues + /// containing source or server timestamps. + /// + /// See OPC UA Part 4 - Services 5.10.4 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_write` - A list of [`WriteValue`] to be sent to the server. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`StatusCode`] results corresponding to each write operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn write( + &self, + nodes_to_write: &[WriteValue], + ) -> Result, StatusCode> { + if nodes_to_write.is_empty() { + // No subscriptions + session_error!(self, "write() was not supplied with any nodes to write"); + Err(StatusCode::BadNothingToDo) + } else { + let request = WriteRequest { + request_header: self.make_request_header(), + nodes_to_write: Some(nodes_to_write.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::WriteResponse(response) = response { + session_debug!(self, "write(), success"); + process_service_result(&response.response_header)?; + Ok(response.results.unwrap_or_default()) + } else { + session_error!(self, "write() failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Updates historical values. The caller is expected to provide one or more history update operations + /// in a slice of HistoryUpdateAction enums which are one of the following: + /// + /// * [`UpdateDataDetails`] + /// * [`UpdateStructureDataDetails`] + /// * [`UpdateEventDetails`] + /// * [`DeleteRawModifiedDetails`] + /// * [`DeleteAtTimeDetails`] + /// * [`DeleteEventDetails`] + /// + /// See OPC UA Part 4 - Services 5.10.5 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `history_update_details` - A list of history update operations. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`ClientHistoryUpdateResult`] results corresponding to history update operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn history_update( + &self, + history_update_details: &[HistoryUpdateAction], + ) -> Result, StatusCode> { + if history_update_details.is_empty() { + // No subscriptions + session_error!( + self, + "history_update(), was not supplied with any detail to update" + ); + Err(StatusCode::BadNothingToDo) + } else { + // Turn the enums into ExtensionObjects + let history_update_details = history_update_details + .iter() + .map(|action| ExtensionObject::from(action)) + .collect::>(); + + let request = HistoryUpdateRequest { + request_header: self.make_request_header(), + history_update_details: Some(history_update_details.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::HistoryUpdateResponse(response) = response { + session_debug!(self, "history_update(), success"); + process_service_result(&response.response_header)?; + let results = if let Some(results) = response.results { + results + } else { + Vec::new() + }; + Ok(results) + } else { + session_error!(self, "history_update() failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } +} diff --git a/lib/src/client/session/services/method.rs b/lib/src/client/session/services/method.rs new file mode 100644 index 000000000..c9f13d328 --- /dev/null +++ b/lib/src/client/session/services/method.rs @@ -0,0 +1,100 @@ +use crate::{ + client::{ + session::{process_unexpected_response, session_debug, session_error}, + Session, + }, + core::supported_message::SupportedMessage, + types::{ + CallMethodRequest, CallMethodResult, CallRequest, MethodId, NodeId, ObjectId, StatusCode, + Variant, + }, +}; + +impl Session { + /// Calls a single method on an object on the server by sending a [`CallRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.11.2 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `method` - The method to call. Note this function takes anything that can be turned into + /// a [`CallMethodRequest`] which includes a ([`NodeId`], [`NodeId`], `Option>`) tuple + /// which refers to the object id, method id, and input arguments respectively. + /// + /// # Returns + /// + /// * `Ok(CallMethodResult)` - A [`CallMethodResult`] for the Method call. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn call( + &self, + method: impl Into, + ) -> Result { + session_debug!(self, "call()"); + let methods_to_call = Some(vec![method.into()]); + let request = CallRequest { + request_header: self.make_request_header(), + methods_to_call, + }; + let response = self.send(request).await?; + if let SupportedMessage::CallResponse(response) = response { + if let Some(mut results) = response.results { + if results.len() != 1 { + session_error!( + self, + "call(), expecting a result from the call to the server, got {} results", + results.len() + ); + Err(StatusCode::BadUnexpectedError) + } else { + Ok(results.remove(0)) + } + } else { + session_error!( + self, + "call(), expecting a result from the call to the server, got nothing" + ); + Err(StatusCode::BadUnexpectedError) + } + } else { + Err(process_unexpected_response(response)) + } + } + + /// Calls GetMonitoredItems via call_method(), putting a sane interface on the input / output. + /// + /// # Arguments + /// + /// * `subscription_id` - Server allocated identifier for the subscription to return monitored items for. + /// + /// # Returns + /// + /// * `Ok((Vec, Vec))` - Result for call, consisting a list of (monitored_item_id, client_handle) + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn call_get_monitored_items( + &self, + subscription_id: u32, + ) -> Result<(Vec, Vec), StatusCode> { + let args = Some(vec![Variant::from(subscription_id)]); + let object_id: NodeId = ObjectId::Server.into(); + let method_id: NodeId = MethodId::Server_GetMonitoredItems.into(); + let request: CallMethodRequest = (object_id, method_id, args).into(); + let response = self.call(request).await?; + if let Some(mut result) = response.output_arguments { + if result.len() == 2 { + let server_handles = >::try_from(&result.remove(0)) + .map_err(|_| StatusCode::BadUnexpectedError)?; + let client_handles = >::try_from(&result.remove(0)) + .map_err(|_| StatusCode::BadUnexpectedError)?; + Ok((server_handles, client_handles)) + } else { + error!("Expected a result with 2 args and didn't get it."); + Err(StatusCode::BadUnexpectedError) + } + } else { + error!("Expected a result and didn't get it."); + Err(StatusCode::BadUnexpectedError) + } + } +} diff --git a/lib/src/client/session/services/mod.rs b/lib/src/client/session/services/mod.rs new file mode 100644 index 000000000..44f652dc4 --- /dev/null +++ b/lib/src/client/session/services/mod.rs @@ -0,0 +1,6 @@ +pub mod attributes; +pub mod method; +pub mod node_management; +pub mod session; +pub mod subscriptions; +pub mod view; diff --git a/lib/src/client/session/services/node_management.rs b/lib/src/client/session/services/node_management.rs new file mode 100644 index 000000000..ddfe14396 --- /dev/null +++ b/lib/src/client/session/services/node_management.rs @@ -0,0 +1,154 @@ +use crate::{ + client::{ + session::{process_service_result, process_unexpected_response, session_error}, + Session, + }, + core::supported_message::SupportedMessage, + types::{ + AddNodesItem, AddNodesRequest, AddNodesResult, AddReferencesItem, AddReferencesRequest, + DeleteNodesItem, DeleteNodesRequest, DeleteReferencesItem, DeleteReferencesRequest, + StatusCode, + }, +}; + +impl Session { + /// Add nodes by sending a [`AddNodesRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.7.2 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_add` - A list of [`AddNodesItem`] to be added to the server. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`AddNodesResult`] corresponding to each add node operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn add_nodes( + &self, + nodes_to_add: &[AddNodesItem], + ) -> Result, StatusCode> { + if nodes_to_add.is_empty() { + session_error!(self, "add_nodes, called with no nodes to add"); + Err(StatusCode::BadNothingToDo) + } else { + let request = AddNodesRequest { + request_header: self.make_request_header(), + nodes_to_add: Some(nodes_to_add.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::AddNodesResponse(response) = response { + Ok(response.results.unwrap()) + } else { + Err(process_unexpected_response(response)) + } + } + } + + /// Add references by sending a [`AddReferencesRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.7.3 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `references_to_add` - A list of [`AddReferencesItem`] to be sent to the server. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of `StatusCode` corresponding to each add reference operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn add_references( + &self, + references_to_add: &[AddReferencesItem], + ) -> Result, StatusCode> { + if references_to_add.is_empty() { + session_error!(self, "add_references, called with no references to add"); + Err(StatusCode::BadNothingToDo) + } else { + let request = AddReferencesRequest { + request_header: self.make_request_header(), + references_to_add: Some(references_to_add.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::AddReferencesResponse(response) = response { + process_service_result(&response.response_header)?; + Ok(response.results.unwrap()) + } else { + Err(process_unexpected_response(response)) + } + } + } + + /// Delete nodes by sending a [`DeleteNodesRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.7.4 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_delete` - A list of [`DeleteNodesItem`] to be sent to the server. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of `StatusCode` corresponding to each delete node operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn delete_nodes( + &self, + nodes_to_delete: &[DeleteNodesItem], + ) -> Result, StatusCode> { + if nodes_to_delete.is_empty() { + session_error!(self, "delete_nodes, called with no nodes to delete"); + Err(StatusCode::BadNothingToDo) + } else { + let request = DeleteNodesRequest { + request_header: self.make_request_header(), + nodes_to_delete: Some(nodes_to_delete.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::DeleteNodesResponse(response) = response { + Ok(response.results.unwrap()) + } else { + Err(process_unexpected_response(response)) + } + } + } + + /// Delete references by sending a [`DeleteReferencesRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.7.5 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_delete` - A list of [`DeleteReferencesItem`] to be sent to the server. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of `StatusCode` corresponding to each delete node operation. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn delete_references( + &self, + references_to_delete: &[DeleteReferencesItem], + ) -> Result, StatusCode> { + if references_to_delete.is_empty() { + session_error!( + self, + "delete_references, called with no references to delete" + ); + Err(StatusCode::BadNothingToDo) + } else { + let request = DeleteReferencesRequest { + request_header: self.make_request_header(), + references_to_delete: Some(references_to_delete.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::DeleteReferencesResponse(response) = response { + Ok(response.results.unwrap()) + } else { + Err(process_unexpected_response(response)) + } + } + } +} diff --git a/lib/src/client/session/services/session.rs b/lib/src/client/session/services/session.rs new file mode 100644 index 000000000..8e09baaba --- /dev/null +++ b/lib/src/client/session/services/session.rs @@ -0,0 +1,370 @@ +use std::sync::Arc; + +use crypto::{certificate_store::CertificateStore, user_identity::make_user_name_identity_token}; + +use crate::{ + client::{ + session::{process_service_result, process_unexpected_response}, + IdentityToken, Session, + }, + core::{ + comms::{secure_channel::SecureChannel, url::hostname_from_url}, + supported_message::SupportedMessage, + }, + crypto::{self, SecurityPolicy}, + types::{ + ActivateSessionRequest, AnonymousIdentityToken, ByteString, CancelRequest, + CloseSessionRequest, CreateSessionRequest, ExtensionObject, IntegerId, NodeId, ObjectId, + SignatureData, StatusCode, UAString, UserNameIdentityToken, UserTokenPolicy, UserTokenType, + X509IdentityToken, + }, +}; + +impl Session { + /// Sends a [`CreateSessionRequest`] to the server, returning the session id of the created + /// session. Internally, the session will store the authentication token which is used for requests + /// subsequent to this call. + /// + /// See OPC UA Part 4 - Services 5.6.2 for complete description of the service and error responses. + /// + /// # Returns + /// + /// * `Ok(NodeId)` - Success, session id + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub(crate) async fn create_session(&self) -> Result { + let endpoint_url = self.session_info.endpoint.endpoint_url.clone(); + + let client_nonce = self.channel.client_nonce(); + let server_uri = UAString::null(); + let session_name = self.session_name.clone(); + + let (client_certificate, _) = { + let certificate_store = trace_write_lock!(self.certificate_store); + certificate_store.read_own_cert_and_pkey_optional() + }; + + let client_certificate = if let Some(ref client_certificate) = client_certificate { + client_certificate.as_byte_string() + } else { + ByteString::null() + }; + + let request = CreateSessionRequest { + request_header: self.make_request_header(), + client_description: self.application_description.clone(), + server_uri, + endpoint_url, + session_name, + client_nonce, + client_certificate, + requested_session_timeout: self.session_timeout, + max_response_message_size: 0, + }; + + let response = self.send(request).await?; + + if let SupportedMessage::CreateSessionResponse(response) = response { + process_service_result(&response.response_header)?; + + let session_id = { + self.session_id.store(Arc::new(response.session_id.clone())); + response.session_id.clone() + }; + self.auth_token + .store(Arc::new(response.authentication_token)); + + self.channel.update_from_created_session( + &response.server_nonce, + &response.server_certificate, + )?; + + let security_policy = self.channel.security_policy(); + + if security_policy != SecurityPolicy::None { + if let Ok(server_certificate) = + crypto::X509::from_byte_string(&response.server_certificate) + { + // Validate server certificate against hostname and application_uri + let hostname = + hostname_from_url(self.session_info.endpoint.endpoint_url.as_ref()) + .map_err(|_| StatusCode::BadUnexpectedError)?; + let application_uri = + self.session_info.endpoint.server.application_uri.as_ref(); + + let certificate_store = trace_write_lock!(self.certificate_store); + let result = certificate_store.validate_or_reject_application_instance_cert( + &server_certificate, + security_policy, + Some(&hostname), + Some(application_uri), + ); + if result.is_bad() { + return Err(result); + } + } else { + return Err(StatusCode::BadCertificateInvalid); + } + } + + Ok(session_id) + } else { + Err(process_unexpected_response(response)) + } + } + + /// Sends an [`ActivateSessionRequest`] to the server to activate this session + /// + /// See OPC UA Part 4 - Services 5.6.3 for complete description of the service and error responses. + /// + /// # Returns + /// + /// * `Ok(())` - Success + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub(crate) async fn activate_session(&self) -> Result<(), StatusCode> { + let secure_channel = trace_read_lock!(self.channel.secure_channel); + + let (user_identity_token, user_token_signature) = + self.user_identity_token(&secure_channel)?; + + let server_cert = secure_channel.remote_cert(); + let server_nonce = secure_channel.remote_nonce_as_byte_string(); + + drop(secure_channel); + + let locale_ids = if self.session_info.preferred_locales.is_empty() { + None + } else { + let locale_ids = self + .session_info + .preferred_locales + .iter() + .map(UAString::from) + .collect(); + Some(locale_ids) + }; + + let security_policy = self.channel.security_policy(); + let client_signature = match security_policy { + SecurityPolicy::None => SignatureData::null(), + _ => { + let (_, client_pkey) = { + let certificate_store = trace_write_lock!(self.certificate_store); + certificate_store.read_own_cert_and_pkey_optional() + }; + + // Create a signature data + if client_pkey.is_none() { + error!("Cannot create client signature - no pkey!"); + return Err(StatusCode::BadUnexpectedError); + } else if server_cert.is_none() { + error!("Cannot sign server certificate because server cert is null"); + return Err(StatusCode::BadUnexpectedError); + } else if server_nonce.is_empty() { + error!("Cannot sign server certificate because server nonce is empty"); + return Err(StatusCode::BadUnexpectedError); + } + + let server_cert = server_cert.unwrap().as_byte_string(); + let signing_key = client_pkey.as_ref().unwrap(); + crypto::create_signature_data( + signing_key, + security_policy, + &server_cert, + &server_nonce, + )? + } + }; + + let request = ActivateSessionRequest { + request_header: self.make_request_header(), + client_signature, + client_software_certificates: None, + locale_ids, + user_identity_token, + user_token_signature, + }; + + let response = self.send(request).await?; + + if let SupportedMessage::ActivateSessionResponse(response) = response { + // trace!("ActivateSessionResponse = {:#?}", response); + process_service_result(&response.response_header)?; + Ok(()) + } else { + Err(process_unexpected_response(response)) + } + } + + /// Create a user identity token from config and the secure channel. + fn user_identity_token( + &self, + channel: &SecureChannel, + ) -> Result<(ExtensionObject, SignatureData), StatusCode> { + let server_cert = &channel.remote_cert(); + let server_nonce = &channel.remote_nonce(); + + let user_identity_token = &self.session_info.user_identity_token; + let user_token_type = match user_identity_token { + IdentityToken::Anonymous => UserTokenType::Anonymous, + IdentityToken::UserName(_, _) => UserTokenType::UserName, + IdentityToken::X509(_, _) => UserTokenType::Certificate, + }; + + let endpoint = &self.session_info.endpoint; + let policy = endpoint.find_policy(user_token_type); + + match policy { + None => { + error!( + "Cannot find user token type {:?} for this endpoint, cannot connect", + user_token_type + ); + Err(StatusCode::BadSecurityPolicyRejected) + } + Some(policy) => { + let security_policy = if policy.security_policy_uri.is_null() { + // Assume None + SecurityPolicy::None + } else { + SecurityPolicy::from_uri(policy.security_policy_uri.as_ref()) + }; + + if security_policy == SecurityPolicy::Unknown { + error!("Unknown security policy {}", policy.security_policy_uri); + return Err(StatusCode::BadSecurityPolicyRejected); + } + + match &user_identity_token { + IdentityToken::Anonymous => { + let identity_token = AnonymousIdentityToken { + policy_id: policy.policy_id.clone(), + }; + let identity_token = ExtensionObject::from_encodable( + ObjectId::AnonymousIdentityToken_Encoding_DefaultBinary, + &identity_token, + ); + Ok((identity_token, SignatureData::null())) + } + IdentityToken::UserName(user, pass) => { + let identity_token = + self.make_user_name_identity_token(channel, policy, user, pass)?; + let identity_token = ExtensionObject::from_encodable( + ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, + &identity_token, + ); + Ok((identity_token, SignatureData::null())) + } + IdentityToken::X509(cert_path, private_key_path) => { + let Some(server_cert) = &server_cert else { + error!("Cannot create an X509IdentityToken because the remote server has no cert with which to create a signature"); + return Err(StatusCode::BadCertificateInvalid); + }; + let certificate_data = + CertificateStore::read_cert(cert_path).map_err(|e| { + error!( + "Certificate cannot be loaded from path {}, error = {}", + cert_path.to_str().unwrap(), + e + ); + StatusCode::BadSecurityPolicyRejected + })?; + let private_key = + CertificateStore::read_pkey(private_key_path).map_err(|e| { + error!( + "Private key cannot be loaded from path {}, error = {}", + private_key_path.to_str().unwrap(), + e + ); + StatusCode::BadSecurityPolicyRejected + })?; + let user_token_signature = crypto::create_signature_data( + &private_key, + security_policy, + &server_cert.as_byte_string(), + &ByteString::from(server_nonce), + )?; + + // Create identity token + let identity_token = X509IdentityToken { + policy_id: policy.policy_id.clone(), + certificate_data: certificate_data.as_byte_string(), + }; + let identity_token = ExtensionObject::from_encodable( + ObjectId::X509IdentityToken_Encoding_DefaultBinary, + &identity_token, + ); + + Ok((identity_token, user_token_signature)) + } + } + } + } + } + + /// Create a user name identity token. + fn make_user_name_identity_token( + &self, + secure_channel: &SecureChannel, + user_token_policy: &UserTokenPolicy, + user: &str, + pass: &str, + ) -> Result { + let channel_security_policy = secure_channel.security_policy(); + let nonce = secure_channel.remote_nonce(); + let cert = secure_channel.remote_cert(); + make_user_name_identity_token( + channel_security_policy, + user_token_policy, + nonce, + &cert, + user, + pass, + ) + } + + /// Close the session by sending a [`CloseSessionRequest`] to the server. + /// + /// This is not accessible by users, they must instead call `disconnect` to properly close the session. + pub(crate) async fn close_session(&self) -> Result<(), StatusCode> { + let request = CloseSessionRequest { + delete_subscriptions: true, + request_header: self.make_request_header(), + }; + let response = self.send(request).await?; + if let SupportedMessage::CloseSessionResponse(_) = response { + Ok(()) + } else { + error!("close_session failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + + /// Cancels an outstanding service request by sending a [`CancelRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.6.5 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `request_handle` - Handle to the outstanding request to be cancelled. + /// + /// # Returns + /// + /// * `Ok(u32)` - Success, number of cancelled requests + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn cancel(&self, request_handle: IntegerId) -> Result { + let request = CancelRequest { + request_header: self.make_request_header(), + request_handle, + }; + let response = self.send(request).await?; + if let SupportedMessage::CancelResponse(response) = response { + process_service_result(&response.response_header)?; + Ok(response.cancel_count) + } else { + Err(process_unexpected_response(response)) + } + } +} diff --git a/lib/src/client/session/services/subscriptions/event_loop.rs b/lib/src/client/session/services/subscriptions/event_loop.rs new file mode 100644 index 000000000..96f039bfa --- /dev/null +++ b/lib/src/client/session/services/subscriptions/event_loop.rs @@ -0,0 +1,169 @@ +use std::{sync::Arc, time::Instant}; + +use futures::{future::Either, stream::FuturesUnordered, Future, Stream, StreamExt}; + +use crate::{ + client::{ + session::{session_debug, session_error}, + Session, + }, + types::StatusCode, +}; + +/// An event on the subscription event loop. +#[derive(Debug)] +pub enum SubscriptionActivity { + /// A publish request received a successful response. + Publish, + /// A publish request failed, either due to a timeout or an error. + /// The publish request will typically be retried. + PublishFailed(StatusCode), +} + +/// An event loop for running periodic subscription tasks. +/// +/// This handles publshing on a fixed interval, republishing failed requests, +/// and subscription keep-alive. +pub struct SubscriptionEventLoop { + session: Arc, + trigger_publish_recv: tokio::sync::watch::Receiver, + max_inflight_publish: usize, + last_external_trigger: Instant, + // This is true if the client has received a message BadTooManyPublishRequests + // and is waiting for a response before making further requests. + is_waiting_for_response: bool, +} + +impl SubscriptionEventLoop { + /// Create a new subscription event loop for `session` + /// + /// # Arguments + /// + /// * `session` - A shared reference to an [AsyncSession]. + /// * `trigger_publish_recv` - A channel used to transmit external publish triggers. + /// This is used to trigger publish outside of the normal schedule, for example when + /// a new subscription is created. + pub fn new( + session: Arc, + trigger_publish_recv: tokio::sync::watch::Receiver, + ) -> Self { + let last_external_trigger = trigger_publish_recv.borrow().clone(); + Self { + max_inflight_publish: session.max_inflight_publish, + last_external_trigger, + trigger_publish_recv, + session, + is_waiting_for_response: false, + } + } + + /// Run the subscription event loop, returning a stream that produces + /// [SubscriptionActivity] enums, reporting activity to the session event loop. + pub fn run(self) -> impl Stream { + futures::stream::unfold( + (self, FuturesUnordered::new()), + |(mut slf, mut futures)| async move { + // Store the next publish time, or None if there are no active subscriptions. + let mut next = slf.session.next_publish_time(false); + let mut recv: tokio::sync::watch::Receiver = + slf.trigger_publish_recv.clone(); + + let res = loop { + // Future for the next periodic publish. We do not send publish requests if there + // are no active subscriptions. In this case, simply return the non-terminating + // future. + let next_tick_fut = if let Some(next) = next { + if slf.is_waiting_for_response && !futures.is_empty() { + Either::Right(futures::future::pending::<()>()) + } else { + Either::Left(tokio::time::sleep_until(next.into())) + } + } else { + Either::Right(futures::future::pending::<()>()) + }; + // If FuturesUnordered is empty, it will immediately yield `None`. We don't want that, + // so if it is empty we return the non-terminating future. + let next_publish_fut = if futures.is_empty() { + Either::Left(futures::future::pending()) + } else { + Either::Right(futures.next()) + }; + + tokio::select! { + // Both internal ticks and external triggers result in publish requests. + v = recv.wait_for(|i| i > &slf.last_external_trigger) => { + if let Ok(v) = v { + debug!("Sending publish due to external trigger"); + // On an external trigger, we always publish. + futures.push(slf.static_publish()); + next = slf.session.next_publish_time(true); + slf.last_external_trigger = v.clone(); + } + } + _ = next_tick_fut => { + // Avoid publishing if there are too many inflight publish requests. + if futures.len() < slf.max_inflight_publish { + debug!("Sending publish due to internal tick"); + futures.push(slf.static_publish()); + } + next = slf.session.next_publish_time(true); + } + res = next_publish_fut => { + match res { + Some(Ok(should_publish_now)) => { + if should_publish_now { + futures.push(slf.static_publish()); + // Set the last publish time. + // We do this to avoid a buildup of publish requests + // if exhausting the queue takes more time than + // a single publishing interval. + slf.session.next_publish_time(true); + } + slf.is_waiting_for_response = false; + + break SubscriptionActivity::Publish + } + Some(Err(e)) => { + match e { + StatusCode::BadTimeout => { + session_debug!(slf.session, "Publish request timed out, sending another"); + if futures.len() < slf.max_inflight_publish { + futures.push(slf.static_publish()); + } + } + StatusCode::BadTooManyPublishRequests => { + session_debug!(slf.session, "Server returned BadTooManyPublishRequests, backing off"); + slf.is_waiting_for_response = true; + } + StatusCode::BadSessionClosed + | StatusCode::BadSessionIdInvalid => { + // TODO: Do something here? + session_error!(slf.session, "Publish response indicates session is dead"); + } + StatusCode::BadNoSubscription + | StatusCode::BadSubscriptionIdInvalid => { + // TODO: Maybe do something here? This could happen when subscriptions are + // in the process of being recreated. Make sure to avoid race conditions. + session_error!(slf.session, "Publish response indicates subscription is dead"); + } + _ => () + } + break SubscriptionActivity::PublishFailed(e) + } + // Should be impossible + None => break SubscriptionActivity::PublishFailed(StatusCode::BadInvalidState) + } + } + } + }; + + Some((res, (slf, futures))) + }, + ) + } + + fn static_publish(&self) -> impl Future> + 'static { + let inner_session = self.session.clone(); + async move { inner_session.publish().await } + } +} diff --git a/lib/src/client/session/services/subscriptions/mod.rs b/lib/src/client/session/services/subscriptions/mod.rs new file mode 100644 index 000000000..7eb16dce8 --- /dev/null +++ b/lib/src/client/session/services/subscriptions/mod.rs @@ -0,0 +1,451 @@ +pub mod event_loop; +mod service; +pub mod state; + +use std::{ + collections::{BTreeSet, HashMap}, + time::Duration, +}; + +use crate::types::{ + DataChangeNotification, DataValue, DecodingOptions, EventNotificationList, ExtensionObject, + Identifier, MonitoringMode, NotificationMessage, ObjectId, ReadValueId, + StatusChangeNotification, Variant, +}; + +pub(crate) struct CreateMonitoredItem { + pub id: u32, + pub client_handle: u32, + pub item_to_monitor: ReadValueId, + pub monitoring_mode: MonitoringMode, + pub queue_size: u32, + pub discard_oldest: bool, + pub sampling_interval: f64, + pub filter: ExtensionObject, +} + +pub(crate) struct ModifyMonitoredItem { + pub id: u32, + pub sampling_interval: f64, + pub queue_size: u32, +} + +/// A set of callbacks for notifications on a subscription. +/// You may implement this on your own struct, or simply use [SubscriptionCallbacks] +/// for a simple collection of closures. +pub trait OnSubscriptionNotification: Send + Sync { + /// Called when a subscription changes state on the server. + fn on_subscription_status_change(&mut self, _notification: StatusChangeNotification) {} + + /// Called for each data value change. + fn on_data_value(&mut self, _notification: DataValue, _item: &MonitoredItem) {} + + /// Called for each received event. + fn on_event(&mut self, _event_fields: Option>, _item: &MonitoredItem) {} +} + +/// A convenient wrapper around a set of callback functions that implements [OnSubscriptionNotification] +pub struct SubscriptionCallbacks { + status_change: Box, + data_value: Box, + event: Box>, &MonitoredItem) + Send + Sync>, +} + +impl SubscriptionCallbacks { + /// Create a new subscription callback wrapper. + /// + /// # Arguments + /// + /// * `status_change` - Called when a subscription changes state on the server. + /// * `data_value` - Called for each received data value. + /// * `event` - Called for each received event. + pub fn new( + status_change: impl FnMut(StatusChangeNotification) + Send + Sync + 'static, + data_value: impl FnMut(DataValue, &MonitoredItem) + Send + Sync + 'static, + event: impl FnMut(Option>, &MonitoredItem) + Send + Sync + 'static, + ) -> Self { + Self { + status_change: Box::new(status_change) + as Box, + data_value: Box::new(data_value) + as Box, + event: Box::new(event) + as Box>, &MonitoredItem) + Send + Sync>, + } + } +} + +impl OnSubscriptionNotification for SubscriptionCallbacks { + fn on_subscription_status_change(&mut self, notification: StatusChangeNotification) { + (&mut self.status_change)(notification); + } + + fn on_data_value(&mut self, notification: DataValue, item: &MonitoredItem) { + (&mut self.data_value)(notification, item); + } + + fn on_event(&mut self, event_fields: Option>, item: &MonitoredItem) { + (&mut self.event)(event_fields, item); + } +} + +/// A wrapper around a data change callback that implements [OnSubscriptionNotification] +pub struct DataChangeCallback { + data_value: Box, +} + +impl DataChangeCallback { + /// Create a new data change callback wrapper. + /// + /// # Arguments + /// + /// * `data_value` - Called for each received data value. + pub fn new(data_value: impl FnMut(DataValue, &MonitoredItem) + Send + Sync + 'static) -> Self { + Self { + data_value: Box::new(data_value) + as Box, + } + } +} + +impl OnSubscriptionNotification for DataChangeCallback { + fn on_data_value(&mut self, notification: DataValue, item: &MonitoredItem) { + (&mut self.data_value)(notification, item); + } +} + +/// A wrapper around an event callback that implements [OnSubscriptionNotification] +pub struct EventCallback { + event: Box>, &MonitoredItem) + Send + Sync>, +} + +impl EventCallback { + /// Create a new event callback wrapper. + /// + /// # Arguments + /// + /// * `data_value` - Called for each received data value. + pub fn new( + event: impl FnMut(Option>, &MonitoredItem) + Send + Sync + 'static, + ) -> Self { + Self { + event: Box::new(event) + as Box>, &MonitoredItem) + Send + Sync>, + } + } +} + +impl OnSubscriptionNotification for EventCallback { + fn on_event(&mut self, event_fields: Option>, item: &MonitoredItem) { + (&mut self.event)(event_fields, item); + } +} + +pub struct MonitoredItem { + /// This is the monitored item's id within the subscription + id: u32, + /// Monitored item's handle. Used internally - not modifiable + client_handle: u32, + // The thing that is actually being monitored - the node id, attribute, index, encoding. + item_to_monitor: ReadValueId, + /// Queue size + queue_size: usize, + /// Monitoring mode + monitoring_mode: MonitoringMode, + /// Sampling interval + sampling_interval: f64, + /// Triggered items + triggered_items: BTreeSet, + /// Whether to discard oldest values on queue overflow + discard_oldest: bool, + /// Active filter + filter: ExtensionObject, +} + +impl MonitoredItem { + pub fn new(client_handle: u32) -> MonitoredItem { + MonitoredItem { + id: 0, + client_handle, + item_to_monitor: ReadValueId::default(), + queue_size: 1, + monitoring_mode: MonitoringMode::Reporting, + sampling_interval: 0.0, + triggered_items: BTreeSet::new(), + discard_oldest: true, + filter: ExtensionObject::null(), + } + } + + /// Server assigned ID of the monitored item. + pub fn id(&self) -> u32 { + self.id + } + + /// Client assigned handle for the monitored item. + pub fn client_handle(&self) -> u32 { + self.client_handle + } + + /// Attribute and node ID for the item the monitored item receives notifications for. + pub fn item_to_monitor(&self) -> &ReadValueId { + &self.item_to_monitor + } + + /// Sampling interval. + pub fn sampling_interval(&self) -> f64 { + self.sampling_interval + } + + /// Queue size on the server. + pub fn queue_size(&self) -> usize { + self.queue_size + } + + /// Whether the oldest values are discarded on queue overflow on the server. + pub fn discard_oldest(&self) -> bool { + self.discard_oldest + } + + pub(crate) fn set_sampling_interval(&mut self, value: f64) { + self.sampling_interval = value; + } + + pub(crate) fn set_queue_size(&mut self, value: usize) { + self.queue_size = value; + } + + pub(crate) fn set_monitoring_mode(&mut self, monitoring_mode: MonitoringMode) { + self.monitoring_mode = monitoring_mode; + } + + pub(crate) fn set_triggering(&mut self, links_to_add: &[u32], links_to_remove: &[u32]) { + links_to_remove.iter().for_each(|i| { + self.triggered_items.remove(i); + }); + links_to_add.iter().for_each(|i| { + self.triggered_items.insert(*i); + }); + } + + pub(crate) fn triggered_items(&self) -> &BTreeSet { + &self.triggered_items + } +} + +pub struct Subscription { + /// Subscription id, supplied by server + subscription_id: u32, + /// Publishing interval in seconds + publishing_interval: Duration, + /// Lifetime count, revised by server + lifetime_count: u32, + /// Max keep alive count, revised by server + max_keep_alive_count: u32, + /// Max notifications per publish, revised by server + max_notifications_per_publish: u32, + /// Publishing enabled + publishing_enabled: bool, + /// Subscription priority + priority: u8, + + /// A map of monitored items associated with the subscription (key = monitored_item_id) + monitored_items: HashMap, + /// A map of client handle to monitored item id + client_handles: HashMap, + + callback: Box, +} + +impl Subscription { + /// Creates a new subscription using the supplied parameters and the supplied data change callback. + pub fn new( + subscription_id: u32, + publishing_interval: Duration, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + priority: u8, + publishing_enabled: bool, + status_change_callback: Box, + ) -> Subscription { + Subscription { + subscription_id, + publishing_interval, + lifetime_count, + max_keep_alive_count, + max_notifications_per_publish, + publishing_enabled, + priority, + monitored_items: HashMap::new(), + client_handles: HashMap::new(), + callback: status_change_callback, + } + } + + pub fn monitored_items(&self) -> &HashMap { + &self.monitored_items + } + + pub fn subscription_id(&self) -> u32 { + self.subscription_id + } + + pub fn publishing_interval(&self) -> Duration { + self.publishing_interval + } + + pub fn lifetime_count(&self) -> u32 { + self.lifetime_count + } + + pub fn priority(&self) -> u8 { + self.priority + } + + pub fn max_keep_alive_count(&self) -> u32 { + self.max_keep_alive_count + } + + pub fn max_notifications_per_publish(&self) -> u32 { + self.max_notifications_per_publish + } + + pub fn publishing_enabled(&self) -> bool { + self.publishing_enabled + } + + pub(crate) fn set_publishing_interval(&mut self, publishing_interval: Duration) { + self.publishing_interval = publishing_interval; + } + + pub(crate) fn set_lifetime_count(&mut self, lifetime_count: u32) { + self.lifetime_count = lifetime_count; + } + + pub(crate) fn set_max_keep_alive_count(&mut self, max_keep_alive_count: u32) { + self.max_keep_alive_count = max_keep_alive_count; + } + + pub(crate) fn set_max_notifications_per_publish(&mut self, max_notifications_per_publish: u32) { + self.max_notifications_per_publish = max_notifications_per_publish; + } + + pub(crate) fn set_publishing_enabled(&mut self, publishing_enabled: bool) { + self.publishing_enabled = publishing_enabled; + } + + pub(crate) fn set_priority(&mut self, priority: u8) { + self.priority = priority; + } + + pub(crate) fn insert_monitored_items(&mut self, items_to_create: Vec) { + items_to_create.into_iter().for_each(|i| { + let monitored_item = MonitoredItem { + id: i.id, + client_handle: i.client_handle, + item_to_monitor: i.item_to_monitor, + queue_size: i.queue_size as usize, + monitoring_mode: i.monitoring_mode, + sampling_interval: i.sampling_interval, + triggered_items: BTreeSet::new(), + discard_oldest: i.discard_oldest, + filter: i.filter, + }; + + let client_handle = monitored_item.client_handle(); + let monitored_item_id = monitored_item.id(); + self.monitored_items + .insert(monitored_item_id, monitored_item); + self.client_handles.insert(client_handle, monitored_item_id); + }); + } + + pub(crate) fn modify_monitored_items(&mut self, items_to_modify: &[ModifyMonitoredItem]) { + items_to_modify.iter().for_each(|i| { + if let Some(ref mut monitored_item) = self.monitored_items.get_mut(&i.id) { + monitored_item.set_sampling_interval(i.sampling_interval); + monitored_item.set_queue_size(i.queue_size as usize); + } + }); + } + + pub(crate) fn delete_monitored_items(&mut self, items_to_delete: &[u32]) { + items_to_delete.iter().for_each(|id| { + // Remove the monitored item and the client handle / id entry + if let Some(monitored_item) = self.monitored_items.remove(id) { + let _ = self.client_handles.remove(&monitored_item.client_handle()); + } + }) + } + + pub(crate) fn set_triggering( + &mut self, + triggering_item_id: u32, + links_to_add: &[u32], + links_to_remove: &[u32], + ) { + if let Some(ref mut monitored_item) = self.monitored_items.get_mut(&triggering_item_id) { + monitored_item.set_triggering(links_to_add, links_to_remove); + } + } + + pub(crate) fn on_notification( + &mut self, + notification: NotificationMessage, + decoding_options: &DecodingOptions, + ) { + let Some(notifications) = notification.notification_data else { + return; + }; + + for obj in notifications { + if obj.node_id.namespace != 0 { + continue; + } + + let Identifier::Numeric(id) = obj.node_id.identifier else { + continue; + }; + + if id == ObjectId::DataChangeNotification_Encoding_DefaultBinary as u32 { + match obj.decode_inner::(decoding_options) { + Ok(it) => { + for notif in it.monitored_items.into_iter().flatten() { + let item = self + .client_handles + .get(¬if.client_handle) + .and_then(|handle| self.monitored_items.get(handle)); + + if let Some(item) = item { + self.callback.on_data_value(notif.value, &item); + } + } + } + Err(e) => warn!("Failed to decode data change notification: {e}"), + } + } else if id == ObjectId::EventNotificationList_Encoding_DefaultBinary as u32 { + match obj.decode_inner::(decoding_options) { + Ok(it) => { + for notif in it.events.into_iter().flatten() { + let item = self + .client_handles + .get(¬if.client_handle) + .and_then(|handle| self.monitored_items.get(handle)); + + if let Some(item) = item { + self.callback.on_event(notif.event_fields, &item); + } + } + } + Err(e) => warn!("Failed to decode event notification: {e}"), + } + } else if id == ObjectId::StatusChangeNotification_Encoding_DefaultBinary as u32 { + match obj.decode_inner::(decoding_options) { + Ok(it) => self.callback.on_subscription_status_change(it), + Err(e) => warn!("Failed to decode status change notification: {e}"), + } + } + } + } +} diff --git a/lib/src/client/session/services/subscriptions/service.rs b/lib/src/client/session/services/subscriptions/service.rs new file mode 100644 index 000000000..141aae7f8 --- /dev/null +++ b/lib/src/client/session/services/subscriptions/service.rs @@ -0,0 +1,984 @@ +use std::{ + collections::HashSet, + time::{Duration, Instant}, +}; + +use crate::{ + client::{ + session::{ + process_service_result, process_unexpected_response, + services::subscriptions::{CreateMonitoredItem, ModifyMonitoredItem, Subscription}, + session_debug, session_error, session_trace, session_warn, + }, + Session, + }, + core::supported_message::SupportedMessage, + types::{ + CreateMonitoredItemsRequest, CreateSubscriptionRequest, DeleteMonitoredItemsRequest, + DeleteSubscriptionsRequest, ModifyMonitoredItemsRequest, ModifySubscriptionRequest, + MonitoredItemCreateRequest, MonitoredItemCreateResult, MonitoredItemModifyRequest, + MonitoredItemModifyResult, MonitoringMode, MonitoringParameters, PublishRequest, + SetMonitoringModeRequest, SetPublishingModeRequest, SetTriggeringRequest, StatusCode, + TimestampsToReturn, TransferResult, TransferSubscriptionsRequest, + }, +}; + +use super::OnSubscriptionNotification; + +impl Session { + async fn create_subscription_inner( + &self, + publishing_interval: Duration, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + publishing_enabled: bool, + priority: u8, + callback: Box, + ) -> Result { + let request = CreateSubscriptionRequest { + request_header: self.make_request_header(), + requested_publishing_interval: publishing_interval.as_secs_f64(), + requested_lifetime_count: lifetime_count, + requested_max_keep_alive_count: max_keep_alive_count, + max_notifications_per_publish, + publishing_enabled, + priority, + }; + let response = self.send(request).await?; + if let SupportedMessage::CreateSubscriptionResponse(response) = response { + process_service_result(&response.response_header)?; + let subscription = Subscription::new( + response.subscription_id, + Duration::from_millis(response.revised_publishing_interval.max(0.0).floor() as u64), + response.revised_lifetime_count, + response.revised_max_keep_alive_count, + max_notifications_per_publish, + priority, + publishing_enabled, + callback, + ); + + // Add the new subscription to the subscription state + { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.add_subscription(subscription); + } + + // Send an async publish request for this new subscription + let _ = self.trigger_publish_tx.send(Instant::now()); + + session_debug!( + self, + "create_subscription, created a subscription with id {}", + response.subscription_id + ); + Ok(response.subscription_id) + } else { + session_error!(self, "create_subscription failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + + /// Create a subscription by sending a [`CreateSubscriptionRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.13.2 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `publishing_interval` - The requested publishing interval defines the cyclic rate that + /// the Subscription is being requested to return Notifications to the Client. This interval + /// is expressed in milliseconds. This interval is represented by the publishing timer in the + /// Subscription state table. The negotiated value for this parameter returned in the + /// response is used as the default sampling interval for MonitoredItems assigned to this + /// Subscription. If the requested value is 0 or negative, the server shall revise with the + /// fastest supported publishing interval in milliseconds. + /// * `lifetime_count` - Requested lifetime count. The lifetime count shall be a minimum of + /// three times the keep keep-alive count. When the publishing timer has expired this + /// number of times without a Publish request being available to send a NotificationMessage, + /// then the Subscription shall be deleted by the Server. + /// * `max_keep_alive_count` - Requested maximum keep-alive count. When the publishing timer has + /// expired this number of times without requiring any NotificationMessage to be sent, the + /// Subscription sends a keep-alive Message to the Client. The negotiated value for this + /// parameter is returned in the response. If the requested value is 0, the server shall + /// revise with the smallest supported keep-alive count. + /// * `max_notifications_per_publish` - The maximum number of notifications that the Client + /// wishes to receive in a single Publish response. A value of zero indicates that there is + /// no limit. The number of notifications per Publish is the sum of monitoredItems in + /// the DataChangeNotification and events in the EventNotificationList. + /// * `priority` - Indicates the relative priority of the Subscription. When more than one + /// Subscription needs to send Notifications, the Server should de-queue a Publish request + /// to the Subscription with the highest priority number. For Subscriptions with equal + /// priority the Server should de-queue Publish requests in a round-robin fashion. + /// A Client that does not require special priority settings should set this value to zero. + /// * `publishing_enabled` - A boolean parameter with the following values - `true` publishing + /// is enabled for the Subscription, `false`, publishing is disabled for the Subscription. + /// The value of this parameter does not affect the value of the monitoring mode Attribute of + /// MonitoredItems. + /// + /// # Returns + /// + /// * `Ok(u32)` - identifier for new subscription + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn create_subscription( + &self, + publishing_interval: Duration, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + priority: u8, + publishing_enabled: bool, + callback: impl OnSubscriptionNotification + Send + Sync + 'static, + ) -> Result { + self.create_subscription_inner( + publishing_interval, + lifetime_count, + max_keep_alive_count, + max_notifications_per_publish, + publishing_enabled, + priority, + Box::new(callback), + ) + .await + } + + fn subscription_exists(&self, subscription_id: u32) -> bool { + let subscription_state = trace_lock!(self.subscription_state); + subscription_state.subscription_exists(subscription_id) + } + + /// Modifies a subscription by sending a [`ModifySubscriptionRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.13.3 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_id` - subscription identifier returned from `create_subscription`. + /// * `publishing_interval` - The requested publishing interval defines the cyclic rate that + /// the Subscription is being requested to return Notifications to the Client. This interval + /// is expressed in milliseconds. This interval is represented by the publishing timer in the + /// Subscription state table. The negotiated value for this parameter returned in the + /// response is used as the default sampling interval for MonitoredItems assigned to this + /// Subscription. If the requested value is 0 or negative, the server shall revise with the + /// fastest supported publishing interval in milliseconds. + /// * `lifetime_count` - Requested lifetime count. The lifetime count shall be a minimum of + /// three times the keep keep-alive count. When the publishing timer has expired this + /// number of times without a Publish request being available to send a NotificationMessage, + /// then the Subscription shall be deleted by the Server. + /// * `max_keep_alive_count` - Requested maximum keep-alive count. When the publishing timer has + /// expired this number of times without requiring any NotificationMessage to be sent, the + /// Subscription sends a keep-alive Message to the Client. The negotiated value for this + /// parameter is returned in the response. If the requested value is 0, the server shall + /// revise with the smallest supported keep-alive count. + /// * `max_notifications_per_publish` - The maximum number of notifications that the Client + /// wishes to receive in a single Publish response. A value of zero indicates that there is + /// no limit. The number of notifications per Publish is the sum of monitoredItems in + /// the DataChangeNotification and events in the EventNotificationList. + /// * `priority` - Indicates the relative priority of the Subscription. When more than one + /// Subscription needs to send Notifications, the Server should de-queue a Publish request + /// to the Subscription with the highest priority number. For Subscriptions with equal + /// priority the Server should de-queue Publish requests in a round-robin fashion. + /// A Client that does not require special priority settings should set this value to zero. + /// + /// # Returns + /// + /// * `Ok(())` - Success + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn modify_subscription( + &self, + subscription_id: u32, + publishing_interval: f64, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + priority: u8, + ) -> Result<(), StatusCode> { + if subscription_id == 0 { + session_error!(self, "modify_subscription, subscription id must be non-zero, or the subscription is considered invalid"); + Err(StatusCode::BadInvalidArgument) + } else if !self.subscription_exists(subscription_id) { + session_error!(self, "modify_subscription, subscription id does not exist"); + Err(StatusCode::BadInvalidArgument) + } else { + let request = ModifySubscriptionRequest { + request_header: self.make_request_header(), + subscription_id, + requested_publishing_interval: publishing_interval, + requested_lifetime_count: lifetime_count, + requested_max_keep_alive_count: max_keep_alive_count, + max_notifications_per_publish, + priority, + }; + let response = self.send(request).await?; + if let SupportedMessage::ModifySubscriptionResponse(response) = response { + process_service_result(&response.response_header)?; + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.modify_subscription( + subscription_id, + Duration::from_millis( + response.revised_publishing_interval.max(0.0).floor() as u64 + ), + response.revised_lifetime_count, + response.revised_max_keep_alive_count, + max_notifications_per_publish, + priority, + ); + session_debug!(self, "modify_subscription success for {}", subscription_id); + Ok(()) + } else { + session_error!(self, "modify_subscription failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Changes the publishing mode of subscriptions by sending a [`SetPublishingModeRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.13.4 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_ids` - one or more subscription identifiers. + /// * `publishing_enabled` - A boolean parameter with the following values - `true` publishing + /// is enabled for the Subscriptions, `false`, publishing is disabled for the Subscriptions. + /// + /// # Returns + /// + /// * `Ok(Vec)` - Service return code for the action for each id, `Good` or `BadSubscriptionIdInvalid` + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn set_publishing_mode( + &self, + subscription_ids: &[u32], + publishing_enabled: bool, + ) -> Result, StatusCode> { + session_debug!( + self, + "set_publishing_mode, for subscriptions {:?}, publishing enabled {}", + subscription_ids, + publishing_enabled + ); + if subscription_ids.is_empty() { + // No subscriptions + session_error!( + self, + "set_publishing_mode, no subscription ids were provided" + ); + Err(StatusCode::BadNothingToDo) + } else { + let request = SetPublishingModeRequest { + request_header: self.make_request_header(), + publishing_enabled, + subscription_ids: Some(subscription_ids.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::SetPublishingModeResponse(response) = response { + process_service_result(&response.response_header)?; + { + // Clear out all subscriptions, assuming the delete worked + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.set_publishing_mode(subscription_ids, publishing_enabled); + } + session_debug!(self, "set_publishing_mode success"); + Ok(response.results.unwrap()) + } else { + session_error!(self, "set_publishing_mode failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Transfers Subscriptions and their MonitoredItems from one Session to another. For example, + /// a Client may need to reopen a Session and then transfer its Subscriptions to that Session. + /// It may also be used by one Client to take over a Subscription from another Client by + /// transferring the Subscription to its Session. + /// + /// See OPC UA Part 4 - Services 5.13.7 for complete description of the service and error responses. + /// + /// * `subscription_ids` - one or more subscription identifiers. + /// * `send_initial_values` - A boolean parameter with the following values - `true` the first + /// publish response shall contain the current values of all monitored items in the subscription, + /// `false`, the first publish response shall contain only the value changes since the last + /// publish response was sent. + /// + /// # Returns + /// + /// * `Ok(Vec)` - The [`TransferResult`] for each transfer subscription. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn transfer_subscriptions( + &self, + subscription_ids: &[u32], + send_initial_values: bool, + ) -> Result, StatusCode> { + if subscription_ids.is_empty() { + // No subscriptions + session_error!( + self, + "set_publishing_mode, no subscription ids were provided" + ); + Err(StatusCode::BadNothingToDo) + } else { + let request = TransferSubscriptionsRequest { + request_header: self.make_request_header(), + subscription_ids: Some(subscription_ids.to_vec()), + send_initial_values, + }; + let response = self.send(request).await?; + if let SupportedMessage::TransferSubscriptionsResponse(response) = response { + process_service_result(&response.response_header)?; + session_debug!(self, "transfer_subscriptions success"); + Ok(response.results.unwrap()) + } else { + session_error!(self, "transfer_subscriptions failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Deletes a subscription by sending a [`DeleteSubscriptionsRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.13.8 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_id` - subscription identifier returned from `create_subscription`. + /// + /// # Returns + /// + /// * `Ok(StatusCode)` - Service return code for the delete action, `Good` or `BadSubscriptionIdInvalid` + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn delete_subscription( + &self, + subscription_id: u32, + ) -> Result { + if subscription_id == 0 { + session_error!(self, "delete_subscription, subscription id 0 is invalid"); + Err(StatusCode::BadInvalidArgument) + } else if !self.subscription_exists(subscription_id) { + session_error!( + self, + "delete_subscription, subscription id {} does not exist", + subscription_id + ); + Err(StatusCode::BadInvalidArgument) + } else { + let result = self.delete_subscriptions(&[subscription_id][..]).await?; + Ok(result[0]) + } + } + + /// Deletes subscriptions by sending a [`DeleteSubscriptionsRequest`] to the server with the list + /// of subscriptions to delete. + /// + /// See OPC UA Part 4 - Services 5.13.8 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_ids` - List of subscription identifiers to delete. + /// + /// # Returns + /// + /// * `Ok(Vec)` - List of result for delete action on each id, `Good` or `BadSubscriptionIdInvalid` + /// The size and order of the list matches the size and order of the input. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn delete_subscriptions( + &self, + subscription_ids: &[u32], + ) -> Result, StatusCode> { + if subscription_ids.is_empty() { + // No subscriptions + session_trace!(self, "delete_subscriptions with no subscriptions"); + Err(StatusCode::BadNothingToDo) + } else { + // Send a delete request holding all the subscription ides that we wish to delete + let request = DeleteSubscriptionsRequest { + request_header: self.make_request_header(), + subscription_ids: Some(subscription_ids.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::DeleteSubscriptionsResponse(response) = response { + process_service_result(&response.response_header)?; + { + // Clear out deleted subscriptions, assuming the delete worked + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_ids.iter().for_each(|id| { + let _ = subscription_state.delete_subscription(*id); + }); + } + session_debug!(self, "delete_subscriptions success"); + Ok(response.results.unwrap()) + } else { + session_error!(self, "delete_subscriptions failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Creates monitored items on a subscription by sending a [`CreateMonitoredItemsRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.12.2 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_id` - The Server-assigned identifier for the Subscription that will report Notifications for this MonitoredItem + /// * `timestamps_to_return` - An enumeration that specifies the timestamp Attributes to be transmitted for each MonitoredItem. + /// * `items_to_create` - A list of [`MonitoredItemCreateRequest`] to be created and assigned to the specified Subscription. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`MonitoredItemCreateResult`] corresponding to the items to create. + /// The size and order of the list matches the size and order of the `items_to_create` request parameter. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn create_monitored_items( + &self, + subscription_id: u32, + timestamps_to_return: TimestampsToReturn, + items_to_create: Vec, + ) -> Result, StatusCode> { + session_debug!( + self, + "create_monitored_items, for subscription {}, {} items", + subscription_id, + items_to_create.len() + ); + if subscription_id == 0 { + session_error!(self, "create_monitored_items, subscription id 0 is invalid"); + Err(StatusCode::BadInvalidArgument) + } else if !self.subscription_exists(subscription_id) { + session_error!( + self, + "create_monitored_items, subscription id {} does not exist", + subscription_id + ); + Err(StatusCode::BadInvalidArgument) + } else if items_to_create.is_empty() { + session_error!( + self, + "create_monitored_items, called with no items to create" + ); + Err(StatusCode::BadNothingToDo) + } else { + let mut final_items_to_create = Vec::new(); + let mut created_items = Vec::new(); + + for mut req in items_to_create { + if req.requested_parameters.client_handle == 0 { + req.requested_parameters.client_handle = self.monitored_item_handle.next(); + } + + final_items_to_create.push(req.clone()); + created_items.push(req); + } + + let request = CreateMonitoredItemsRequest { + request_header: self.make_request_header(), + subscription_id, + timestamps_to_return, + items_to_create: Some(final_items_to_create.clone()), + }; + let response = self.send(request).await?; + + if let SupportedMessage::CreateMonitoredItemsResponse(response) = response { + process_service_result(&response.response_header)?; + if let Some(ref results) = response.results { + session_debug!( + self, + "create_monitored_items, {} items created", + created_items.len() + ); + // Set the items in our internal state + let items_to_create = created_items + .into_iter() + .zip(results) + .map(|(i, r)| CreateMonitoredItem { + id: r.monitored_item_id, + client_handle: i.requested_parameters.client_handle, + discard_oldest: i.requested_parameters.discard_oldest, + item_to_monitor: i.item_to_monitor.clone(), + monitoring_mode: i.monitoring_mode, + queue_size: r.revised_queue_size, + sampling_interval: r.revised_sampling_interval, + filter: i.requested_parameters.filter, + }) + .collect::>(); + { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.insert_monitored_items(subscription_id, items_to_create); + } + } else { + session_debug!( + self, + "create_monitored_items, success but no monitored items were created" + ); + } + Ok(response.results.unwrap()) + } else { + session_error!(self, "create_monitored_items failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Modifies monitored items on a subscription by sending a [`ModifyMonitoredItemsRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.12.3 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_id` - The Server-assigned identifier for the Subscription that will report Notifications for this MonitoredItem. + /// * `timestamps_to_return` - An enumeration that specifies the timestamp Attributes to be transmitted for each MonitoredItem. + /// * `items_to_modify` - The list of [`MonitoredItemModifyRequest`] to modify. + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`MonitoredItemModifyResult`] corresponding to the MonitoredItems to modify. + /// The size and order of the list matches the size and order of the `items_to_modify` request parameter. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn modify_monitored_items( + &self, + subscription_id: u32, + timestamps_to_return: TimestampsToReturn, + items_to_modify: &[MonitoredItemModifyRequest], + ) -> Result, StatusCode> { + session_debug!( + self, + "modify_monitored_items, for subscription {}, {} items", + subscription_id, + items_to_modify.len() + ); + if subscription_id == 0 { + session_error!(self, "modify_monitored_items, subscription id 0 is invalid"); + Err(StatusCode::BadInvalidArgument) + } else if !self.subscription_exists(subscription_id) { + session_error!( + self, + "modify_monitored_items, subscription id {} does not exist", + subscription_id + ); + Err(StatusCode::BadInvalidArgument) + } else if items_to_modify.is_empty() { + session_error!( + self, + "modify_monitored_items, called with no items to modify" + ); + Err(StatusCode::BadNothingToDo) + } else { + let monitored_item_ids = items_to_modify + .iter() + .map(|i| i.monitored_item_id) + .collect::>(); + let request = ModifyMonitoredItemsRequest { + request_header: self.make_request_header(), + subscription_id, + timestamps_to_return, + items_to_modify: Some(items_to_modify.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::ModifyMonitoredItemsResponse(response) = response { + process_service_result(&response.response_header)?; + if let Some(ref results) = response.results { + // Set the items in our internal state + let items_to_modify = monitored_item_ids + .iter() + .zip(results.iter()) + .map(|(id, r)| ModifyMonitoredItem { + id: *id, + queue_size: r.revised_queue_size, + sampling_interval: r.revised_sampling_interval, + }) + .collect::>(); + { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state + .modify_monitored_items(subscription_id, &items_to_modify); + } + } + session_debug!(self, "modify_monitored_items, success"); + Ok(response.results.unwrap()) + } else { + session_error!(self, "modify_monitored_items failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Sets the monitoring mode on one or more monitored items by sending a [`SetMonitoringModeRequest`] + /// to the server. + /// + /// See OPC UA Part 4 - Services 5.12.4 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_id` - the subscription identifier containing the monitored items to be modified. + /// * `monitoring_mode` - the monitored mode to apply to the monitored items + /// * `monitored_item_ids` - the monitored items to be modified + /// + /// # Returns + /// + /// * `Ok(Vec)` - Individual result for each monitored item. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn set_monitoring_mode( + &self, + subscription_id: u32, + monitoring_mode: MonitoringMode, + monitored_item_ids: &[u32], + ) -> Result, StatusCode> { + if monitored_item_ids.is_empty() { + session_error!(self, "set_monitoring_mode, called with nothing to do"); + Err(StatusCode::BadNothingToDo) + } else { + let request = { + let monitored_item_ids = Some(monitored_item_ids.to_vec()); + SetMonitoringModeRequest { + request_header: self.make_request_header(), + subscription_id, + monitoring_mode, + monitored_item_ids, + } + }; + let response = self.send(request).await?; + + { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.set_monitoring_mode( + subscription_id, + monitored_item_ids, + monitoring_mode, + ); + } + if let SupportedMessage::SetMonitoringModeResponse(response) = response { + Ok(response.results.unwrap()) + } else { + session_error!(self, "set_monitoring_mode failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Sets a monitored item so it becomes the trigger that causes other monitored items to send + /// change events in the same update. Sends a [`SetTriggeringRequest`] to the server. + /// Note that `items_to_remove` is applied before `items_to_add`. + /// + /// See OPC UA Part 4 - Services 5.12.5 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_id` - the subscription identifier containing the monitored item to be used as the trigger. + /// * `monitored_item_id` - the monitored item that is the trigger. + /// * `links_to_add` - zero or more items to be added to the monitored item's triggering list. + /// * `items_to_remove` - zero or more items to be removed from the monitored item's triggering list. + /// + /// # Returns + /// + /// * `Ok((Option>, Option>))` - Individual result for each item added / removed for the SetTriggering call. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn set_triggering( + &self, + subscription_id: u32, + triggering_item_id: u32, + links_to_add: &[u32], + links_to_remove: &[u32], + ) -> Result<(Option>, Option>), StatusCode> { + if links_to_add.is_empty() && links_to_remove.is_empty() { + session_error!(self, "set_triggering, called with nothing to add or remove"); + Err(StatusCode::BadNothingToDo) + } else { + let request = { + let links_to_add = if links_to_add.is_empty() { + None + } else { + Some(links_to_add.to_vec()) + }; + let links_to_remove = if links_to_remove.is_empty() { + None + } else { + Some(links_to_remove.to_vec()) + }; + SetTriggeringRequest { + request_header: self.make_request_header(), + subscription_id, + triggering_item_id, + links_to_add, + links_to_remove, + } + }; + let response = self.send(request).await?; + if let SupportedMessage::SetTriggeringResponse(response) = response { + // Update client side state + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.set_triggering( + subscription_id, + triggering_item_id, + links_to_add, + links_to_remove, + ); + Ok((response.add_results, response.remove_results)) + } else { + session_error!(self, "set_triggering failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Deletes monitored items from a subscription by sending a [`DeleteMonitoredItemsRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.12.6 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `subscription_id` - The Server-assigned identifier for the Subscription that will report Notifications for this MonitoredItem. + /// * `items_to_delete` - List of Server-assigned ids for the MonitoredItems to be deleted. + /// + /// # Returns + /// + /// * `Ok(Vec)` - List of StatusCodes for the MonitoredItems to delete. The size and + /// order of the list matches the size and order of the `items_to_delete` request parameter. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn delete_monitored_items( + &self, + subscription_id: u32, + items_to_delete: &[u32], + ) -> Result, StatusCode> { + session_debug!( + self, + "delete_monitored_items, subscription {} for {} items", + subscription_id, + items_to_delete.len() + ); + if subscription_id == 0 { + session_error!(self, "delete_monitored_items, subscription id 0 is invalid"); + Err(StatusCode::BadInvalidArgument) + } else if !self.subscription_exists(subscription_id) { + session_error!( + self, + "delete_monitored_items, subscription id {} does not exist", + subscription_id + ); + Err(StatusCode::BadInvalidArgument) + } else if items_to_delete.is_empty() { + session_error!( + self, + "delete_monitored_items, called with no items to delete" + ); + Err(StatusCode::BadNothingToDo) + } else { + let request = DeleteMonitoredItemsRequest { + request_header: self.make_request_header(), + subscription_id, + monitored_item_ids: Some(items_to_delete.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::DeleteMonitoredItemsResponse(response) = response { + process_service_result(&response.response_header)?; + if response.results.is_some() { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.delete_monitored_items(subscription_id, items_to_delete); + } + session_debug!(self, "delete_monitored_items, success"); + Ok(response.results.unwrap()) + } else { + session_error!(self, "delete_monitored_items failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + pub(crate) fn next_publish_time(&self, set_last_publish: bool) -> Option { + let mut subscription_state = trace_lock!(self.subscription_state); + if set_last_publish { + subscription_state.set_last_publish(); + } + subscription_state.next_publish_time() + } + + /// Send a publish request, returning `true` if the session should send a new request + /// immediately. + pub(crate) async fn publish(&self) -> Result { + let acks = { + let mut subscription_state = trace_lock!(self.subscription_state); + let acks = subscription_state.take_acknowledgements(); + if acks.len() > 0 { + Some(acks) + } else { + None + } + }; + + if log_enabled!(log::Level::Debug) { + let sequence_nrs: Vec = acks + .iter() + .flatten() + .map(|ack| ack.sequence_number) + .collect(); + debug!( + "publish is acknowledging subscription acknowledgements with sequence nrs {:?}", + sequence_nrs + ); + } + + let request = PublishRequest { + request_header: self.channel.make_request_header(self.publish_timeout), + subscription_acknowledgements: acks.clone(), + }; + + let response = self.channel.send(request, self.publish_timeout).await; + + let err_status = match response { + Ok(SupportedMessage::PublishResponse(r)) => { + session_debug!(self, "PublishResponse"); + + let decoding_options = { + let secure_channel = trace_read_lock!(self.channel.secure_channel); + secure_channel.decoding_options() + }; + + { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.handle_notification( + r.subscription_id, + r.notification_message, + &decoding_options, + ); + } + + return Ok(r.more_notifications); + } + Err(e) => e, + Ok(r) => { + session_error!(self, "publish failed {:?}", r); + process_unexpected_response(r) + } + }; + + if let Some(acks) = acks { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.re_queue_acknowledgements(acks); + } + + Err(err_status) + } + + /// This code attempts to take the existing subscriptions created by a previous session and + /// either transfer them to this session, or construct them from scratch. + pub(crate) async fn transfer_subscriptions_from_old_session(&self) { + // TODO: This whole thing should probably be optional, so that users can + // customize the process. + let subscription_ids = { + let subscription_state = trace_lock!(self.subscription_state); + subscription_state.subscription_ids() + }; + + let Some(subscription_ids) = subscription_ids else { + return; + }; + + // Start by getting the subscription ids + // Try to use TransferSubscriptions to move subscriptions_ids over. If this + // works then there is nothing else to do. + let mut subscription_ids_to_recreate = + subscription_ids.iter().copied().collect::>(); + if let Ok(transfer_results) = self.transfer_subscriptions(&subscription_ids, true).await { + session_debug!(self, "transfer_results = {:?}", transfer_results); + transfer_results.iter().enumerate().for_each(|(i, r)| { + if r.status_code.is_good() { + // Subscription was transferred so it does not need to be recreated + subscription_ids_to_recreate.remove(&subscription_ids[i]); + } + }); + } + + // But if it didn't work, then some or all subscriptions have to be remade. + if !subscription_ids_to_recreate.is_empty() { + session_warn!(self, "Some or all of the existing subscriptions could not be transferred and must be created manually"); + } + + for subscription_id in subscription_ids_to_recreate { + session_debug!(self, "Recreating subscription {}", subscription_id); + + let deleted_subscription = { + let mut subscription_state = trace_lock!(self.subscription_state); + subscription_state.delete_subscription(subscription_id) + }; + + let Some(subscription) = deleted_subscription else { + session_warn!( + self, + "Subscription removed from session while transfer in progress" + ); + continue; + }; + + let Ok(subscription_id) = self + .create_subscription_inner( + subscription.publishing_interval, + subscription.lifetime_count, + subscription.max_keep_alive_count, + subscription.max_notifications_per_publish, + subscription.publishing_enabled, + subscription.priority, + subscription.callback, + ) + .await + else { + session_warn!( + self, + "Could not create a subscription from the existing subscription {}", + subscription_id + ); + continue; + }; + + let items_to_create = subscription + .monitored_items + .iter() + .map(|(_, item)| MonitoredItemCreateRequest { + item_to_monitor: item.item_to_monitor().clone(), + monitoring_mode: item.monitoring_mode, + requested_parameters: MonitoringParameters { + client_handle: item.client_handle(), + sampling_interval: item.sampling_interval(), + filter: item.filter.clone(), + queue_size: item.queue_size() as u32, + discard_oldest: item.discard_oldest(), + }, + }) + .collect::>(); + + let mut iter = items_to_create.into_iter(); + + loop { + let chunk = (&mut iter) + .take(self.recreate_monitored_items_chunk) + .collect::>(); + + if chunk.is_empty() { + break; + } + + let _ = self + .create_monitored_items(subscription_id, TimestampsToReturn::Both, chunk) + .await; + } + + for item in subscription.monitored_items.values() { + let triggered_items = item.triggered_items(); + if !triggered_items.is_empty() { + let links_to_add = triggered_items.iter().copied().collect::>(); + let _ = self + .set_triggering(subscription_id, item.id(), links_to_add.as_slice(), &[]) + .await; + } + } + } + } +} diff --git a/lib/src/client/subscription_state.rs b/lib/src/client/session/services/subscriptions/state.rs similarity index 56% rename from lib/src/client/subscription_state.rs rename to lib/src/client/session/services/subscriptions/state.rs index 2b346577d..cf916ade0 100644 --- a/lib/src/client/subscription_state.rs +++ b/lib/src/client/session/services/subscriptions/state.rs @@ -1,34 +1,77 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock +use std::{ + collections::HashMap, + time::{Duration, Instant}, +}; -use std::collections::HashMap; +use crate::types::{ + DecodingOptions, MonitoringMode, NotificationMessage, SubscriptionAcknowledgement, +}; -use tokio::time::Instant; +use super::{CreateMonitoredItem, ModifyMonitoredItem, Subscription}; -use crate::types::service_types::{DataChangeNotification, EventNotificationList}; - -use super::subscription::*; - -/// Holds the live subscription state +/// State containing all known subscriptions in the session. pub struct SubscriptionState { - /// Subscripion keep alive timeout - keep_alive_timeout: Option, - /// Timestamp of last pushish request - last_publish_request: Instant, - /// Subscriptions (key = subscription_id) subscriptions: HashMap, + last_publish: Instant, + acknowledgements: Vec, + keep_alive_timeout: Option, + min_publish_interval: Duration, } impl SubscriptionState { - pub fn new() -> SubscriptionState { - SubscriptionState { - keep_alive_timeout: None, - last_publish_request: Instant::now(), + /// Create a new subscription state. + /// + /// # Arguments + /// + /// * `min_publishing_interval` - The minimum accepted publishing interval, any lower values + /// will be set to this. + pub(crate) fn new(min_publish_interval: Duration) -> Self { + Self { subscriptions: HashMap::new(), + last_publish: Instant::now() - min_publish_interval, + acknowledgements: Vec::new(), + keep_alive_timeout: None, + min_publish_interval, } } + pub(crate) fn next_publish_time(&self) -> Option { + if self.subscriptions.is_empty() { + return None; + } + + let next = self + .subscriptions + .values() + .filter(|s| s.publishing_enabled()) + .map(|s| s.publishing_interval().max(self.min_publish_interval)) + .min() + .or_else(|| self.keep_alive_timeout) + .map(|e| self.last_publish + e); + + next + } + + pub(crate) fn set_last_publish(&mut self) { + self.last_publish = Instant::now(); + } + + pub(crate) fn take_acknowledgements(&mut self) -> Vec { + std::mem::take(&mut self.acknowledgements) + } + + fn add_acknowledgement(&mut self, subscription_id: u32, sequence_number: u32) { + self.acknowledgements.push(SubscriptionAcknowledgement { + subscription_id, + sequence_number, + }) + } + + pub(crate) fn re_queue_acknowledgements(&mut self, acks: Vec) { + self.acknowledgements.extend(acks.into_iter()); + } + + /// List of subscription IDs. pub fn subscription_ids(&self) -> Option> { if self.subscriptions.is_empty() { None @@ -37,10 +80,12 @@ impl SubscriptionState { } } + /// Check if the subscription ID is known. pub fn subscription_exists(&self, subscription_id: u32) -> bool { self.subscriptions.contains_key(&subscription_id) } + /// Get a reference to a subscription by ID. pub fn get(&self, subscription_id: u32) -> Option<&Subscription> { self.subscriptions.get(&subscription_id) } @@ -54,7 +99,7 @@ impl SubscriptionState { pub(crate) fn modify_subscription( &mut self, subscription_id: u32, - publishing_interval: f64, + publishing_interval: Duration, lifetime_count: u32, max_keep_alive_count: u32, max_notifications_per_publish: u32, @@ -88,26 +133,10 @@ impl SubscriptionState { }); } - pub(crate) fn on_data_change( - &mut self, - subscription_id: u32, - data_change_notifications: &[DataChangeNotification], - ) { - if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { - subscription.on_data_change(data_change_notifications); - } - } - - pub(crate) fn on_event(&mut self, subscription_id: u32, events: &[EventNotificationList]) { - if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { - subscription.on_event(events); - } - } - pub(crate) fn insert_monitored_items( &mut self, subscription_id: u32, - items_to_create: &[CreateMonitoredItem], + items_to_create: Vec, ) { if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { subscription.insert_monitored_items(items_to_create); @@ -142,23 +171,38 @@ impl SubscriptionState { } } - pub(crate) fn last_publish_request(&self) -> Instant { - self.last_publish_request - } - - pub(crate) fn set_last_publish_request(&mut self, now: Instant) { - self.last_publish_request = now; + pub(crate) fn set_monitoring_mode( + &mut self, + subscription_id: u32, + montiored_item_ids: &[u32], + monitoring_mode: MonitoringMode, + ) { + if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { + for id in montiored_item_ids { + if let Some(item) = subscription.monitored_items.get_mut(id) { + item.set_monitoring_mode(monitoring_mode); + } + } + } } - pub(crate) fn keep_alive_timeout(&self) -> Option { - self.keep_alive_timeout + pub(crate) fn handle_notification( + &mut self, + subscription_id: u32, + notification: NotificationMessage, + decoding_options: &DecodingOptions, + ) { + self.add_acknowledgement(subscription_id, notification.sequence_number); + if let Some(sub) = self.subscriptions.get_mut(&subscription_id) { + sub.on_notification(notification, decoding_options); + } } fn set_keep_alive_timeout(&mut self) { self.keep_alive_timeout = self .subscriptions .values() - .map(|v| (v.publishing_interval() * v.lifetime_count() as f64).floor() as u64) + .map(|v| v.publishing_interval() * v.lifetime_count()) .min() } } diff --git a/lib/src/client/session/services/view.rs b/lib/src/client/session/services/view.rs new file mode 100644 index 000000000..6aed0c987 --- /dev/null +++ b/lib/src/client/session/services/view.rs @@ -0,0 +1,232 @@ +use crate::{ + client::{ + session::{ + process_service_result, process_unexpected_response, session_debug, session_error, + }, + Session, + }, + core::supported_message::SupportedMessage, + types::{ + BrowseDescription, BrowseNextRequest, BrowsePath, BrowsePathResult, BrowseRequest, + BrowseResult, ByteString, DateTime, NodeId, RegisterNodesRequest, StatusCode, + TranslateBrowsePathsToNodeIdsRequest, UnregisterNodesRequest, ViewDescription, + }, +}; + +impl Session { + /// Discover the references to the specified nodes by sending a [`BrowseRequest`] to the server. + /// + /// See OPC UA Part 4 - Services 5.8.2 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_browse` - A list of [`BrowseDescription`] describing nodes to browse. + /// + /// # Returns + /// + /// * `Ok(Option)` - A list [`BrowseResult`] corresponding to each node to browse. A browse result + /// may contain a continuation point, for use with `browse_next()`. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn browse( + &self, + nodes_to_browse: &[BrowseDescription], + ) -> Result>, StatusCode> { + if nodes_to_browse.is_empty() { + session_error!(self, "browse, was not supplied with any nodes to browse"); + Err(StatusCode::BadNothingToDo) + } else { + let request = BrowseRequest { + request_header: self.make_request_header(), + view: ViewDescription { + view_id: NodeId::null(), + timestamp: DateTime::null(), + view_version: 0, + }, + requested_max_references_per_node: 1000, + nodes_to_browse: Some(nodes_to_browse.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::BrowseResponse(response) = response { + session_debug!(self, "browse, success"); + process_service_result(&response.response_header)?; + Ok(response.results) + } else { + session_error!(self, "browse failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Continue to discover references to nodes by sending continuation points in a [`BrowseNextRequest`] + /// to the server. This function may have to be called repeatedly to process the initial query. + /// + /// See OPC UA Part 4 - Services 5.8.3 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `release_continuation_points` - Flag indicating if the continuation points should be released by the server + /// * `continuation_points` - A list of [`BrowseDescription`] continuation points + /// + /// # Returns + /// + /// * `Ok(Option)` - A list [`BrowseResult`] corresponding to each node to browse. A browse result + /// may contain a continuation point, for use with `browse_next()`. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn browse_next( + &self, + release_continuation_points: bool, + continuation_points: &[ByteString], + ) -> Result>, StatusCode> { + if continuation_points.is_empty() { + Err(StatusCode::BadNothingToDo) + } else { + let request = BrowseNextRequest { + request_header: self.make_request_header(), + continuation_points: Some(continuation_points.to_vec()), + release_continuation_points, + }; + let response = self.send(request).await?; + if let SupportedMessage::BrowseNextResponse(response) = response { + session_debug!(self, "browse_next, success"); + process_service_result(&response.response_header)?; + Ok(response.results) + } else { + session_error!(self, "browse_next failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Translate browse paths to NodeIds by sending a [`TranslateBrowsePathsToNodeIdsRequest`] request to the Server + /// Each [`BrowsePath`] is constructed of a starting node and a `RelativePath`. The specified starting node + /// identifies the node from which the RelativePath is based. The RelativePath contains a sequence of + /// ReferenceTypes and BrowseNames. + /// + /// See OPC UA Part 4 - Services 5.8.4 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `browse_paths` - A list of [`BrowsePath`] node + relative path for the server to look up + /// + /// # Returns + /// + /// * `Ok(Vec>)` - List of [`BrowsePathResult`] for the list of browse + /// paths. The size and order of the list matches the size and order of the `browse_paths` + /// parameter. + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn translate_browse_paths_to_node_ids( + &self, + browse_paths: &[BrowsePath], + ) -> Result, StatusCode> { + if browse_paths.is_empty() { + session_error!( + self, + "translate_browse_paths_to_node_ids, was not supplied with any browse paths" + ); + Err(StatusCode::BadNothingToDo) + } else { + let request = TranslateBrowsePathsToNodeIdsRequest { + request_header: self.make_request_header(), + browse_paths: Some(browse_paths.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::TranslateBrowsePathsToNodeIdsResponse(response) = response { + session_debug!(self, "translate_browse_paths_to_node_ids, success"); + process_service_result(&response.response_header)?; + Ok(response.results.unwrap_or_default()) + } else { + session_error!( + self, + "translate_browse_paths_to_node_ids failed {:?}", + response + ); + Err(process_unexpected_response(response)) + } + } + } + + /// Register nodes on the server by sending a [`RegisterNodesRequest`]. The purpose of this + /// call is server-dependent but allows a client to ask a server to create nodes which are + /// otherwise expensive to set up or maintain, e.g. nodes attached to hardware. + /// + /// See OPC UA Part 4 - Services 5.8.5 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_register` - A list of [`NodeId`] nodes for the server to register + /// + /// # Returns + /// + /// * `Ok(Vec)` - A list of [`NodeId`] corresponding to size and order of the input. The + /// server may return an alias for the input `NodeId` + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn register_nodes( + &self, + nodes_to_register: &[NodeId], + ) -> Result, StatusCode> { + if nodes_to_register.is_empty() { + session_error!( + self, + "register_nodes, was not supplied with any nodes to register" + ); + Err(StatusCode::BadNothingToDo) + } else { + let request = RegisterNodesRequest { + request_header: self.make_request_header(), + nodes_to_register: Some(nodes_to_register.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::RegisterNodesResponse(response) = response { + session_debug!(self, "register_nodes, success"); + process_service_result(&response.response_header)?; + Ok(response.registered_node_ids.unwrap()) + } else { + session_error!(self, "register_nodes failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } + + /// Unregister nodes on the server by sending a [`UnregisterNodesRequest`]. This indicates to + /// the server that the client relinquishes any need for these nodes. The server will ignore + /// unregistered nodes. + /// + /// See OPC UA Part 4 - Services 5.8.5 for complete description of the service and error responses. + /// + /// # Arguments + /// + /// * `nodes_to_unregister` - A list of [`NodeId`] nodes for the server to unregister + /// + /// # Returns + /// + /// * `Ok(())` - Request succeeded, server ignores invalid nodes + /// * `Err(StatusCode)` - Request failed, [Status code](StatusCode) is the reason for failure. + /// + pub async fn unregister_nodes(&self, nodes_to_unregister: &[NodeId]) -> Result<(), StatusCode> { + if nodes_to_unregister.is_empty() { + session_error!( + self, + "unregister_nodes, was not supplied with any nodes to unregister" + ); + Err(StatusCode::BadNothingToDo) + } else { + let request = UnregisterNodesRequest { + request_header: self.make_request_header(), + nodes_to_unregister: Some(nodes_to_unregister.to_vec()), + }; + let response = self.send(request).await?; + if let SupportedMessage::UnregisterNodesResponse(response) = response { + session_debug!(self, "unregister_nodes, success"); + process_service_result(&response.response_header)?; + Ok(()) + } else { + session_error!(self, "unregister_nodes failed {:?}", response); + Err(process_unexpected_response(response)) + } + } + } +} diff --git a/lib/src/client/session/session.rs b/lib/src/client/session/session.rs index abdf5e871..9402ac5aa 100644 --- a/lib/src/client/session/session.rs +++ b/lib/src/client/session/session.rs @@ -1,2424 +1,188 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Session functionality for the current open client connection. This module contains functions -//! to call for all typically synchronous operations during an OPC UA session. -//! -//! The session also has async functionality but that is reserved for publish requests on subscriptions -//! and events. use std::{ - cmp, - collections::HashSet, - result::Result, - str::FromStr, - sync::{mpsc::SyncSender, Arc}, - thread, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, + time::{Duration, Instant}, }; -use tokio::{ - sync::oneshot, - time::{interval, Duration, Instant}, -}; +use arc_swap::ArcSwap; use crate::{ client::{ - callbacks::{OnConnectionStatusChange, OnSessionClosed, OnSubscriptionNotification}, - client::IdentityToken, - comms::tcp_transport::TcpTransport, - process_service_result, process_unexpected_response, - session::{ - services::*, - session_debug, session_error, - session_state::{ConnectionState, SessionState}, - session_trace, session_warn, - }, - session_retry_policy::{Answer, SessionRetryPolicy}, - subscription::{self, Subscription}, - subscription_state::SubscriptionState, + retry::SessionRetryPolicy, transport::tcp::TransportConfiguration, AsyncSecureChannel, + ClientConfig, }, - core::{ - comms::{ - secure_channel::{Role, SecureChannel}, - url::*, - }, - supported_message::SupportedMessage, - RUNTIME, - }, - crypto::{ - self as crypto, user_identity::make_user_name_identity_token, CertificateStore, - SecurityPolicy, X509, - }, - deregister_runtime_component, register_runtime_component, - sync::*, - types::{node_ids::ObjectId, status_code::StatusCode, *}, + core::{handle::AtomicHandle, supported_message::SupportedMessage}, + crypto::CertificateStore, + sync::{Mutex, RwLock}, + types::{ApplicationDescription, DecodingOptions, NodeId, RequestHeader, StatusCode, UAString}, }; -/// Information about the server endpoint, security policy, security mode and user identity that the session will -/// will use to establish a connection. -#[derive(Debug)] -pub struct SessionInfo { - /// The endpoint - pub endpoint: EndpointDescription, - /// User identity token - pub user_identity_token: IdentityToken, - /// Preferred language locales - pub preferred_locales: Vec, -} +use super::{services::subscriptions::state::SubscriptionState, SessionEventLoop, SessionInfo}; -impl Into for EndpointDescription { - fn into(self) -> SessionInfo { - (self, IdentityToken::Anonymous).into() - } +#[derive(Clone, Copy)] +pub enum SessionState { + Disconnected, + Connected, + Connecting, } -impl Into for (EndpointDescription, IdentityToken) { - fn into(self) -> SessionInfo { - SessionInfo { - endpoint: self.0, - user_identity_token: self.1, - preferred_locales: Vec::new(), - } - } +lazy_static! { + static ref NEXT_SESSION_ID: AtomicU32 = AtomicU32::new(1); } -/// A `Session` runs in a loop, which can be terminated by sending it a `SessionCommand`. -#[derive(Debug)] -pub enum SessionCommand { - /// Stop running as soon as possible - Stop, -} - -/// A session of the client. The session is associated with an endpoint and maintains a state -/// when it is active. The `Session` struct provides functions for all the supported -/// request types in the API. +/// An OPC-UA session. This session provides methods for all supported services that require an open session. /// /// Note that not all servers may support all service requests and calling an unsupported API /// may cause the connection to be dropped. Your client is expected to know the capabilities of /// the server it is calling to avoid this. /// pub struct Session { - /// The client application's name. - application_description: ApplicationDescription, - /// A name for the session, supplied during create - session_name: UAString, - /// The session connection info. - session_info: SessionInfo, - /// Runtime state of the session, reset if disconnected. - session_state: Arc>, - /// Subscriptions state. - subscription_state: Arc>, - /// Transport layer. - transport: TcpTransport, - /// Certificate store. - certificate_store: Arc>, - /// Secure channel information. - secure_channel: Arc>, - /// Session retry policy. - session_retry_policy: Arc>, - /// Ignore clock skew between the client and the server. - ignore_clock_skew: bool, - /// Single threaded executor flag (for TCP transport). Unused. - single_threaded_executor: bool, - /// Tokio runtime - runtime: Arc>, -} - -impl Drop for Session { - fn drop(&mut self) { - info!("Session has dropped"); - self.disconnect(); - } + pub(super) channel: AsyncSecureChannel, + pub(super) state_watch_rx: tokio::sync::watch::Receiver, + pub(super) state_watch_tx: tokio::sync::watch::Sender, + pub(super) certificate_store: Arc>, + pub(super) session_id: Arc>, + pub(super) auth_token: Arc>, + pub(super) internal_session_id: AtomicU32, + pub(super) session_info: SessionInfo, + pub(super) session_name: UAString, + pub(super) application_description: ApplicationDescription, + pub(super) request_timeout: Duration, + pub(super) publish_timeout: Duration, + pub(super) recreate_monitored_items_chunk: usize, + pub(super) session_timeout: f64, + pub(super) max_inflight_publish: usize, + pub subscription_state: Mutex, + pub(super) monitored_item_handle: AtomicHandle, + pub(super) trigger_publish_tx: tokio::sync::watch::Sender, } impl Session { - /// Create a new session from the supplied application description, certificate store and session - /// information. - /// - /// # Arguments - /// - /// * `application_description` - information about the client that will be provided to the server - /// * `certificate_store` - certificate management on disk - /// * `session_info` - information required to establish a new session. - /// - /// # Returns - /// - /// * `Session` - the interface that shall be used to communicate between the client and the server. - /// - pub(crate) fn new( - application_description: ApplicationDescription, - session_name: T, + pub(crate) fn new( certificate_store: Arc>, session_info: SessionInfo, + session_name: UAString, + application_description: ApplicationDescription, session_retry_policy: SessionRetryPolicy, decoding_options: DecodingOptions, - ignore_clock_skew: bool, - single_threaded_executor: bool, - ) -> Session - where - T: Into, - { - let session_name = session_name.into(); - - let secure_channel = Arc::new(RwLock::new(SecureChannel::new( - certificate_store.clone(), - Role::Client, - decoding_options, - ))); - - let subscription_state = Arc::new(RwLock::new(SubscriptionState::new())); - - let session_state = Arc::new(RwLock::new(SessionState::new( - ignore_clock_skew, - secure_channel.clone(), - subscription_state.clone(), - ))); - - let transport = TcpTransport::new( - secure_channel.clone(), - session_state.clone(), - single_threaded_executor, - ); - - // This runtime is single threaded. The one for the transport may be multi-threaded - let runtime = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - - Session { - application_description, - session_name, + config: &ClientConfig, + ) -> (Arc, SessionEventLoop) { + let auth_token: Arc> = Default::default(); + let (state_watch_tx, state_watch_rx) = + tokio::sync::watch::channel(SessionState::Disconnected); + let (trigger_publish_tx, trigger_publish_rx) = tokio::sync::watch::channel(Instant::now()); + + let session = Arc::new(Session { + channel: AsyncSecureChannel::new( + certificate_store.clone(), + session_info.clone(), + session_retry_policy.clone(), + decoding_options, + config.performance.ignore_clock_skew, + auth_token.clone(), + TransportConfiguration { + max_pending_incoming: 5, + max_inflight: config.performance.max_inflight_messages, + send_buffer_size: config.decoding_options.max_chunk_size, + recv_buffer_size: config.decoding_options.max_incoming_chunk_size, + max_message_size: config.decoding_options.max_message_size, + max_chunk_count: config.decoding_options.max_chunk_count, + }, + ), + internal_session_id: AtomicU32::new(NEXT_SESSION_ID.fetch_add(1, Ordering::Relaxed)), + state_watch_rx, + state_watch_tx, + session_id: Default::default(), session_info, - session_state, + auth_token, + session_name, + application_description, certificate_store, - subscription_state, - transport, - secure_channel, - session_retry_policy: Arc::new(Mutex::new(session_retry_policy)), - ignore_clock_skew, - single_threaded_executor, - runtime: Arc::new(Mutex::new(runtime)), - } - } - - fn reset(&mut self) { - // Clear the existing secure channel state - { - let mut secure_channel = trace_write_lock!(self.secure_channel); - secure_channel.clear_security_token(); - } - - // Create a new session state - self.session_state = Arc::new(RwLock::new(SessionState::new( - self.ignore_clock_skew, - self.secure_channel.clone(), - self.subscription_state.clone(), - ))); - - // Keep the existing transport, we should never drop a tokio runtime from a sync function - } - - /// Connects to the server, creates and activates a session. If there - /// is a failure, it will be communicated by the status code in the result. - /// - /// # Returns - /// - /// * `Ok(())` - connection has happened and the session is activated - /// * `Err(StatusCode)` - reason for failure - /// - pub fn connect_and_activate(&mut self) -> Result<(), StatusCode> { - // Connect now using the session state - self.connect()?; - self.create_session()?; - self.activate_session()?; - Ok(()) - } - - /// Sets the session retry policy that dictates what this session will do if the connection - /// fails or goes down. The retry policy enables the session to retry a connection on an - /// interval up to a maxmimum number of times. - /// - /// # Arguments - /// - /// * `session_retry_policy` - the session retry policy to use - /// - pub fn set_session_retry_policy(&mut self, session_retry_policy: SessionRetryPolicy) { - self.session_retry_policy = Arc::new(Mutex::new(session_retry_policy)); - } - - /// Register a callback to be notified when the session has been closed. - /// - /// # Arguments - /// - /// * `session_closed_callback` - the session closed callback - /// - pub fn set_session_closed_callback(&mut self, session_closed_callback: CB) - where - CB: OnSessionClosed + Send + Sync + 'static, - { - let mut session_state = trace_write_lock!(self.session_state); - session_state.set_session_closed_callback(session_closed_callback); - } - - /// Registers a callback to be notified when the session connection status has changed. - /// This will be called if connection status changes from connected to disconnected or vice versa. - /// - /// # Arguments - /// - /// * `connection_status_callback` - the connection status callback. - /// - pub fn set_connection_status_callback(&mut self, connection_status_callback: CB) - where - CB: OnConnectionStatusChange + Send + Sync + 'static, - { - let mut session_state = trace_write_lock!(self.session_state); - session_state.set_connection_status_callback(connection_status_callback); - } - - /// Reconnects to the server and tries to activate the existing session. If there - /// is a failure, it will be communicated by the status code in the result. You should not - /// call this if there is a session retry policy associated with the session. - /// - /// Reconnecting will attempt to transfer or recreate subscriptions that were on the old - /// session before it terminated. - /// - /// # Returns - /// - /// * `Ok(())` - reconnection has happened and the session is activated - /// * `Err(StatusCode)` - reason for failure - /// - pub fn reconnect_and_activate(&mut self) -> Result<(), StatusCode> { - // Do nothing if already connected / activated - if self.is_connected() { - session_error!( - self, - "Reconnect is going to do nothing because already connected" - ); - Err(StatusCode::BadUnexpectedError) - } else { - // Reset the session state - self.reset(); - - // Connect to server (again) - self.connect_no_retry()?; - - // Attempt to reactivate the existing session - match self.activate_session() { - Err(status_code) => { - // Activation didn't work, so create a new session - info!("Session activation failed on reconnect, error = {}, so creating a new session", status_code); - { - let mut session_state = trace_write_lock!(self.session_state); - session_state.reset(); - } - - session_debug!(self, "create_session"); - self.create_session()?; - session_debug!(self, "activate_session"); - self.activate_session()?; - session_debug!(self, "reconnect should be complete"); - } - Ok(_) => { - info!("Activation succeeded"); - } - } - session_debug!(self, "transfer_subscriptions_from_old_session"); - self.transfer_subscriptions_from_old_session()?; - Ok(()) - } - } - - /// This code attempts to take the existing subscriptions created by a previous session and - /// either transfer them to this session, or construct them from scratch. - fn transfer_subscriptions_from_old_session(&mut self) -> Result<(), StatusCode> { - let subscription_state = self.subscription_state.clone(); - - let subscription_ids = { - let subscription_state = trace_read_lock!(subscription_state); - subscription_state.subscription_ids() - }; - - // Start by getting the subscription ids - if let Some(subscription_ids) = subscription_ids { - // Try to use TransferSubscriptions to move subscriptions_ids over. If this - // works then there is nothing else to do. - let mut subscription_ids_to_recreate = - subscription_ids.iter().copied().collect::>(); - if let Ok(transfer_results) = self.transfer_subscriptions(&subscription_ids, true) { - session_debug!(self, "transfer_results = {:?}", transfer_results); - transfer_results.iter().enumerate().for_each(|(i, r)| { - if r.status_code.is_good() { - // Subscription was transferred so it does not need to be recreated - subscription_ids_to_recreate.remove(&subscription_ids[i]); - } - }); - } - - // But if it didn't work, then some or all subscriptions have to be remade. - if !subscription_ids_to_recreate.is_empty() { - session_warn!(self, "Some or all of the existing subscriptions could not be transferred and must be created manually"); - } - - // Now create any subscriptions that could not be transferred - subscription_ids_to_recreate - .iter() - .for_each(|subscription_id| { - info!("Recreating subscription {}", subscription_id); - // Remove the subscription data, create it again from scratch - let deleted_subscription = { - let mut subscription_state = trace_write_lock!(subscription_state); - subscription_state.delete_subscription(*subscription_id) - }; - - if let Some(subscription) = deleted_subscription { - // Attempt to replicate the subscription (subscription id will be new) - if let Ok(subscription_id) = self.create_subscription_inner( - subscription.publishing_interval(), - subscription.lifetime_count(), - subscription.max_keep_alive_count(), - subscription.max_notifications_per_publish(), - subscription.priority(), - subscription.publishing_enabled(), - subscription.notification_callback(), - ) { - info!("New subscription created with id {}", subscription_id); - - // For each monitored item - let items_to_create = subscription - .monitored_items() - .iter() - .map(|(_, item)| MonitoredItemCreateRequest { - item_to_monitor: item.item_to_monitor().clone(), - monitoring_mode: item.monitoring_mode(), - requested_parameters: MonitoringParameters { - client_handle: item.client_handle(), - sampling_interval: item.sampling_interval(), - filter: ExtensionObject::null(), - queue_size: item.queue_size() as u32, - discard_oldest: true, - }, - }) - .collect::>(); - let _ = self.create_monitored_items( - subscription_id, - TimestampsToReturn::Both, - &items_to_create, - ); - - // Recreate any triggers for the monitored item. This code assumes monitored item - // ids are the same value as they were in the previous subscription. - subscription.monitored_items().iter().for_each(|(_, item)| { - let triggered_items = item.triggered_items(); - if !triggered_items.is_empty() { - let links_to_add = - triggered_items.iter().copied().collect::>(); - let _ = self.set_triggering( - subscription_id, - item.id(), - links_to_add.as_slice(), - &[], - ); - } - }); - } else { - session_warn!( - self, - "Could not create a subscription from the existing subscription {}", - subscription_id - ); - } - } else { - panic!( - "Subscription {}, doesn't exist although it should", - subscription_id - ); - } - }); - } - Ok(()) - } - - /// Connects to the server using the retry policy to repeat connecting until such time as it - /// succeeds or the policy says to give up. If there is a failure, it will be - /// communicated by the status code in the result. - pub fn connect(&self) -> Result<(), StatusCode> { - loop { - match self.connect_no_retry() { - Ok(_) => { - info!("Connect was successful"); - let mut session_retry_policy = trace_lock!(self.session_retry_policy); - session_retry_policy.reset_retry_count(); - return Ok(()); - } - Err(status_code) => { - self.disconnect(); - let mut session_retry_policy = trace_lock!(self.session_retry_policy); - session_retry_policy.increment_retry_count(); - session_warn!( - self, - "Connect was unsuccessful, error = {}, retries = {}", - status_code, - session_retry_policy.retry_count() - ); - - match session_retry_policy.should_retry_connect(DateTime::now()) { - Answer::GiveUp => { - session_error!(self, "Session has given up trying to connect to the server after {} retries", session_retry_policy.retry_count()); - return Err(StatusCode::BadNotConnected); - } - Answer::Retry => { - info!("Retrying to connect to server..."); - session_retry_policy.set_last_attempt(DateTime::now()); - } - Answer::WaitFor(sleep_for) => { - // Sleep for the instructed interval before looping around and trying - // once more. - thread::sleep(Duration::from_millis(sleep_for as u64)); - } - } - } - } - } - } - - /// Connects to the server using the configured session arguments. No attempt is made to retry - /// the connection if the attempt fails. If there is a failure, it will be communicated by the - /// status code in the result. - /// - /// # Returns - /// - /// * `Ok(())` - connection has happened - /// * `Err(StatusCode)` - reason for failure - /// - pub fn connect_no_retry(&self) -> Result<(), StatusCode> { - let endpoint_url = self.session_info.endpoint.endpoint_url.clone(); - info!("Connect"); - let security_policy = - SecurityPolicy::from_str(self.session_info.endpoint.security_policy_uri.as_ref()) - .unwrap(); - if security_policy == SecurityPolicy::Unknown { - session_error!( - self, - "connect, security policy \"{}\" is unknown", - self.session_info.endpoint.security_policy_uri.as_ref() - ); - Err(StatusCode::BadSecurityPolicyRejected) - } else { - let (cert, key) = { - let certificate_store = trace_write_lock!(self.certificate_store); - certificate_store.read_own_cert_and_pkey_optional() - }; - - { - let mut secure_channel = trace_write_lock!(self.secure_channel); - secure_channel.set_private_key(key); - secure_channel.set_cert(cert); - secure_channel.set_security_policy(security_policy); - secure_channel.set_security_mode(self.session_info.endpoint.security_mode); - let _ = secure_channel.set_remote_cert_from_byte_string( - &self.session_info.endpoint.server_certificate, - ); - info!("Security policy = {:?}", security_policy); - info!( - "Security mode = {:?}", - self.session_info.endpoint.security_mode - ); - } - - // Transport's tokio runtime is made here, not in transport - self.transport.connect(endpoint_url.as_ref())?; - self.open_secure_channel()?; - self.on_connection_status_change(true); - Ok(()) - } - } - - pub(crate) fn session_state(&self) -> Arc> { - self.session_state.clone() - } - - /// Disconnect from the server. Disconnect is an explicit command to drop the socket and throw - /// away all state information. If you disconnect you cannot reconnect to your existing session - /// or retrieve any existing subscriptions. - pub fn disconnect(&self) { - if self.is_connected() { - let _ = self.close_session_and_delete_subscriptions(); - let _ = self.close_secure_channel(); - - { - let session_state = trace_read_lock!(self.session_state); - session_state.quit(); - } - - self.transport.wait_for_disconnect(); - self.on_connection_status_change(false); - } - } - - /// Test if the session is in a connected state - /// - /// # Returns - /// - /// * `true` - Session is connected - /// * `false` - Session is not connected - /// - pub fn is_connected(&self) -> bool { - self.transport.is_connected() - } - - /// Internal constant for the sleep interval used during polling - const POLL_SLEEP_INTERVAL: u64 = 10; - - /// Synchronously runs a polling loop over the supplied session. Running a session performs - /// periodic actions such as receiving messages, processing subscriptions, and recovering from - /// connection errors. The run function will return if the session is disconnected and - /// cannot be reestablished. - /// - /// # Arguments - /// - /// * `session` - the session to run ynchronously - /// - pub fn run(session: Arc>) { - let (_tx, rx) = oneshot::channel(); - Self::run_loop(session, Self::POLL_SLEEP_INTERVAL, rx); - } - - /// Runs the session asynchronously on a new thread. The function returns immediately - /// and gives a caller a `Sender` that can be used to send a message to the session - /// to cause it to terminate. Do not drop this sender (i.e. make sure to bind it to a variable with - /// sufficient lifetime) or the session will terminate as soon as you do. - /// - /// Running a session performs periodic actions such as receiving messages, processing subscriptions, - /// and recovering from. connection errors. The session will terminate by itself if it is disconnected - /// and cannot be reestablished. It will terminate if the sender is dropped or if sent a ClientCommand - /// to terminate. caller to this function can monitor the status of the session through state - /// calls to know when this happens. - /// - /// - /// # Arguments - /// - /// * `session` - the session to run asynchronously - /// - /// # Returns - /// - /// * `oneshot::Sender` - A sender that allows the caller to send a message to the - /// run loop to cause it to stop. Note that dropping the sender, i.e. not binding it to - /// a variable will also cause the loop to stop. - /// - pub fn run_async(session: Arc>) -> oneshot::Sender { - let (tx, rx) = oneshot::channel(); - thread::spawn(move || Self::run_loop(session, Self::POLL_SLEEP_INTERVAL, rx)); - tx - } + request_timeout: config.request_timeout, + session_timeout: config.session_timeout as f64, + publish_timeout: config.publish_timeout, + max_inflight_publish: config.max_inflight_publish, + recreate_monitored_items_chunk: config.performance.recreate_monitored_items_chunk, + subscription_state: Mutex::new(SubscriptionState::new(config.min_publish_interval)), + monitored_item_handle: AtomicHandle::new(1000), + trigger_publish_tx, + }); - /// The asynchronous main session loop. This is the function that processes responses and - /// keeps the session alive. Note that while the client normally calls `run()` or `run_loop()` - /// to invoke this, there may be situations where the client wishes to directly use this - /// function, for example if the client has its own Tokio runtime and prefers to spawn the task - /// with that. - pub async fn session_task( - session: Arc>, - sleep_interval: u64, - rx: oneshot::Receiver, - ) { - tokio::select! { - _ = async { - let mut timer = interval(Duration::from_millis(sleep_interval)); - loop { - // Poll the session. - let poll_result = { - let mut session = session.write(); - session.poll().await - }; - match poll_result { - Ok(did_something) => { - // If the session did nothing, then sleep for a moment to save some CPU - if !did_something { - timer.tick().await; - } - } - Err(_) => { - // Break the loop if connection goes down - info!("Run session connection to server broke, so terminating"); - break; - } - } - } - } => {} - message = rx => { - if let Ok(message) = message { - // Only message is a Quit command so no point even testing what it is. - info!("Run session was terminated by a message {:?}", message); - } - else { - warn!("Run session was terminated, presumably by caller dropping oneshot sender. Don't do that unless you meant to."); - } - } - } + ( + session.clone(), + SessionEventLoop::new( + session, + session_retry_policy, + trigger_publish_rx, + config.keep_alive_interval, + ), + ) } - /// The main running loop for a session. This is used by `run()` and `run_async()` to run - /// continuously until a signal is received to terminate. - /// - /// # Arguments + /// Send a message and wait for response, using the default configured timeout. /// - /// * `session` - The session - /// * `sleep_interval` - An internal polling timer in ms - /// * `rx` - A receiver that the task uses to receive a quit command directly from the caller. - /// - pub fn run_loop( - session: Arc>, - sleep_interval: u64, - rx: oneshot::Receiver, - ) { - let task = { - let session = session.clone(); - async move { - Self::session_task(session, sleep_interval, rx).await; - } - }; - // Spawn the task on the alloted runtime - let runtime = { - let session = trace_read_lock!(session); - session.runtime.clone() - }; - let runtime = trace_lock!(runtime); - runtime.block_on(task); + /// In order to set a different timeout, call `send` on the inner channel instead. + pub(super) async fn send( + &self, + request: impl Into, + ) -> Result { + self.channel.send(request, self.request_timeout).await } - /// Polls on the session which basically dispatches any pending - /// async responses, attempts to reconnect if the client is disconnected from the client and - /// sleeps a little bit if nothing needed to be done. - /// - /// # Arguments - /// - /// * `sleep_for` - the period of time in milliseconds that poll should sleep for if it performed - /// no action. - /// - /// # Returns - /// - /// * `true` - if an action was performed during the poll - /// * `false` - if no action was performed during the poll and the poll slept - /// - pub async fn poll(&mut self) -> Result { - let did_something = if self.is_connected() { - let mut session_state = trace_write_lock!(self.session_state); - session_state.handle_publish_responses() - } else { - let should_retry_connect = { - let session_retry_policy = trace_lock!(self.session_retry_policy); - session_retry_policy.should_retry_connect(DateTime::now()) - }; - match should_retry_connect { - Answer::GiveUp => { - let session_retry_policy = trace_lock!(self.session_retry_policy); - session_error!( - self, - "Session has given up trying to reconnect to the server after {} retries", - session_retry_policy.retry_count() - ); - return Err(()); - } - Answer::Retry => { - info!("Retrying to reconnect to server..."); - { - let mut session_retry_policy = trace_lock!(self.session_retry_policy); - session_retry_policy.set_last_attempt(DateTime::now()); - } - if self.reconnect_and_activate().is_ok() { - info!("Retry to connect was successful"); - let mut session_retry_policy = trace_lock!(self.session_retry_policy); - session_retry_policy.reset_retry_count(); - } else { - let mut session_retry_policy = trace_lock!(self.session_retry_policy); - session_retry_policy.increment_retry_count(); - session_warn!( - self, - "Reconnect was unsuccessful, retries = {}", - session_retry_policy.retry_count() - ); - drop(session_retry_policy); - self.disconnect(); - } - true - } - Answer::WaitFor(_) => { - // Note we could sleep for the interval in the WaitFor(), but the poll() sleeps - // anyway so it probably makes no odds. - false - } - } - }; - Ok(did_something) + /// Create a request header with the default timeout. + pub(super) fn make_request_header(&self) -> RequestHeader { + self.channel.make_request_header(self.request_timeout) } - /// Start a task that will periodically "ping" the server to keep the session alive. The ping rate - /// will be 3/4 the session timeout rate. - /// - /// NOTE: This code assumes that the session_timeout period never changes, e.g. if you - /// connected to a server, negotiate a timeout period and then for whatever reason need to - /// reconnect to that same server, you will receive the same timeout. If you get a different - /// timeout then this code will not care and will continue to ping at the original rate. - fn spawn_session_activity_task(&self, session_timeout: f64) { - session_debug!(self, "spawn_session_activity_task({})", session_timeout); - - let connection_state = { - let session_state = trace_read_lock!(self.session_state); - session_state.connection_state() - }; - - let session_state = self.session_state.clone(); - - // Session activity will happen every 3/4 of the timeout period - const MIN_SESSION_ACTIVITY_MS: u64 = 1000; - let session_activity = cmp::max((session_timeout as u64 / 4) * 3, MIN_SESSION_ACTIVITY_MS); - session_debug!( - self, - "session timeout is {}, activity timer is {}", - session_timeout, - session_activity + /// Reset the session after a hard disconnect, clearing the session ID and incrementing the internal + /// session counter. + pub(crate) fn reset(&self) { + self.session_id.store(Arc::new(NodeId::null())); + self.internal_session_id.store( + NEXT_SESSION_ID.fetch_add(1, Ordering::Relaxed), + Ordering::Relaxed, ); - - let id = format!("session-activity-thread-{:?}", thread::current().id()); - let runtime = trace_lock!(self.runtime); - runtime.spawn(async move { - register_runtime_component!(&id); - // The timer runs at a higher frequency timer loop to terminate as soon after the session - // state has terminated. Each time it runs it will test if the interval has elapsed or not. - let session_activity_interval = Duration::from_millis(session_activity); - let mut timer = interval(Duration::from_millis(MIN_SESSION_ACTIVITY_MS)); - let mut last_timeout = Instant::now(); - - loop { - timer.tick().await; - - if connection_state.is_finished() { - info!("Session activity timer is terminating"); - break; - } - - // Get the time now - let now = Instant::now(); - - // Calculate to interval since last check - let interval = now - last_timeout; - if interval > session_activity_interval { - match connection_state.state() { - ConnectionState::Processing => { - info!("Session activity keep-alive request"); - let mut session_state = trace_write_lock!(session_state); - let request_header = session_state.make_request_header(); - let request = ReadRequest { - request_header, - max_age: 1f64, - timestamps_to_return: TimestampsToReturn::Server, - nodes_to_read: Some(vec![]), - }; - // The response to this is ignored - let _ = session_state.async_send_request(request, None); - } - connection_state => { - info!("Session activity keep-alive is doing nothing - connection state = {:?}", connection_state); - } - }; - last_timeout = now; - } - } - - info!("Session activity timer task is finished"); - deregister_runtime_component!(&id); - }); } - /// Start a task that will periodically send a publish request to keep the subscriptions alive. - /// The request rate will be 3/4 of the shortest (revised publishing interval * the revised keep - /// alive count) of all subscriptions that belong to a single session. - fn spawn_subscription_activity_task(&self) { - session_debug!(self, "spawn_subscription_activity_task",); - - let connection_state = { - let session_state = trace_read_lock!(self.session_state); - session_state.connection_state() - }; - - const MIN_SUBSCRIPTION_ACTIVITY_MS: u64 = 1000; - let session_state = self.session_state.clone(); - let subscription_state = self.subscription_state.clone(); - - let id = format!("subscription-activity-thread-{:?}", thread::current().id()); - let runtime = trace_lock!(self.runtime); - runtime.spawn(async move { - register_runtime_component!(&id); - - // The timer runs at a higher frequency timer loop to terminate as soon after the session - // state has terminated. Each time it runs it will test if the interval has elapsed or not. - let mut timer = interval(Duration::from_millis(MIN_SUBSCRIPTION_ACTIVITY_MS)); - - let mut last_timeout: Instant; - let mut subscription_activity_interval: Duration; - - loop { - timer.tick().await; - - if connection_state.is_finished() { - info!("Session activity timer is terminating"); - break; - } - - if let (Some(keep_alive_timeout), last_publish_request) = { - let subscription_state = trace_read_lock!(subscription_state); - ( - subscription_state.keep_alive_timeout(), - subscription_state.last_publish_request(), - ) - } { - subscription_activity_interval = - Duration::from_millis((keep_alive_timeout / 4) * 3); - last_timeout = last_publish_request; + /// Wait for the session to be in either a connected or disconnected state. + async fn wait_for_state(&self, connected: bool) -> bool { + let mut rx = self.state_watch_rx.clone(); - // Get the time now - let now = Instant::now(); - - // Calculate to interval since last check - let interval = now - last_timeout; - if interval > subscription_activity_interval { - let mut session_state = trace_write_lock!(session_state); - let _ = session_state.async_publish(); - } - } - } - - info!("Subscription activity timer task is finished"); - deregister_runtime_component!(&id); - }); - } - - /// This is the internal handler for create subscription that receives the callback wrapped up and reference counted. - fn create_subscription_inner( - &self, - publishing_interval: f64, - lifetime_count: u32, - max_keep_alive_count: u32, - max_notifications_per_publish: u32, - priority: u8, - publishing_enabled: bool, - callback: Arc>, - ) -> Result { - let request = CreateSubscriptionRequest { - request_header: self.make_request_header(), - requested_publishing_interval: publishing_interval, - requested_lifetime_count: lifetime_count, - requested_max_keep_alive_count: max_keep_alive_count, - max_notifications_per_publish, - publishing_enabled, - priority, + let res = match rx + .wait_for(|s| { + connected && matches!(*s, SessionState::Connected) + || !connected && matches!(*s, SessionState::Disconnected) + }) + .await + { + Ok(_) => true, + Err(_) => false, }; - let response = self.send_request(request)?; - if let SupportedMessage::CreateSubscriptionResponse(response) = response { - process_service_result(&response.response_header)?; - let subscription = Subscription::new( - response.subscription_id, - response.revised_publishing_interval, - response.revised_lifetime_count, - response.revised_max_keep_alive_count, - max_notifications_per_publish, - publishing_enabled, - priority, - callback, - ); - - // Add the new subscription to the subscription state - { - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state.add_subscription(subscription); - } - - // Send an async publish request for this new subscription - { - let mut session_state = trace_write_lock!(self.session_state); - let _ = session_state.async_publish(); - } - session_debug!( - self, - "create_subscription, created a subscription with id {}", - response.subscription_id - ); - Ok(response.subscription_id) - } else { - session_error!(self, "create_subscription failed {:?}", response); - Err(process_unexpected_response(response)) - } + res } - /// Deletes all subscriptions by sending a [`DeleteSubscriptionsRequest`] to the server with - /// ids for all subscriptions. - /// - /// # Returns - /// - /// * `Ok(Vec<(u32, StatusCode)>)` - List of (id, status code) result for delete action on each id, `Good` or `BadSubscriptionIdInvalid` - /// * `Err(StatusCode)` - Status code reason for failure - /// - /// [`DeleteSubscriptionsRequest`]: ./struct.DeleteSubscriptionsRequest.html - /// - pub fn delete_all_subscriptions(&self) -> Result, StatusCode> { - let subscription_ids = { - let subscription_state = trace_read_lock!(self.subscription_state); - subscription_state.subscription_ids() - }; - if let Some(ref subscription_ids) = subscription_ids { - let status_codes = self.delete_subscriptions(subscription_ids.as_slice())?; - // Return a list of (id, status_code) for each subscription - Ok(subscription_ids - .iter() - .zip(status_codes) - .map(|(id, status_code)| (*id, status_code)) - .collect()) - } else { - // No subscriptions - session_trace!( - self, - "delete_all_subscriptions, called when there are no subscriptions" - ); - Err(StatusCode::BadNothingToDo) - } + /// The internal ID of the session, used to keep track of multiple sessions in the same program. + pub fn session_id(&self) -> u32 { + self.internal_session_id.load(Ordering::Relaxed) } - /// Closes the session and deletes all subscriptions - /// - /// # Returns - /// - /// * `Ok(())` - if the session was closed - /// * `Err(StatusCode)` - Status code reason for failure + /// Convenience method to wait for a connection to the server. /// - /// [`CloseSessionRequest`]: ./struct.CloseSessionRequest.html - /// - pub fn close_session_and_delete_subscriptions(&self) -> Result<(), StatusCode> { - if !self.is_connected() { - return Err(StatusCode::BadNotConnected); - } - // for some operations like enumerating endpoints, there is no session equivalent - // on the server and it's a local helper object, only. In that case: nothing to do. - if trace_read_lock!(self.session_state).session_id().identifier == Identifier::Numeric(0) { - return Ok(()); - } - let request = CloseSessionRequest { - delete_subscriptions: true, - request_header: self.make_request_header(), - }; - let response = self.send_request(request)?; - if let SupportedMessage::CloseSessionResponse(_) = response { - let mut subscription_state = trace_write_lock!(self.subscription_state); - if let Some(subscription_ids) = subscription_state.subscription_ids() { - for subscription_id in subscription_ids { - subscription_state.delete_subscription(subscription_id); - } - } - Ok(()) - } else { - session_error!(self, "close_session failed {:?}", response); - Err(process_unexpected_response(response)) - } + /// You should also monitor the session event loop. If it ends, this method will never return. + pub async fn wait_for_connection(&self) -> bool { + self.wait_for_state(true).await } - /// Returns the subscription state object - pub fn subscription_state(&self) -> Arc> { - self.subscription_state.clone() - } + /// Disconnect from the server and wait until disconnected. + pub async fn disconnect(&self) -> Result<(), StatusCode> { + self.close_session().await?; + self.channel.close_channel().await; - /// Returns a string identifier for the session - pub(crate) fn session_id(&self) -> String { - let session_state = self.session_state(); - let session_state = session_state.read(); - format!("session:{}", session_state.id()) - } + self.wait_for_state(false).await; - /// Notify any callback of the connection status change - fn on_connection_status_change(&self, connected: bool) { - let mut session_state = trace_write_lock!(self.session_state); - session_state.on_connection_status_change(connected); - } - - /// Returns the security policy - fn security_policy(&self) -> SecurityPolicy { - let secure_channel = trace_read_lock!(self.secure_channel); - secure_channel.security_policy() - } - - // Test if the subscription by id exists - fn subscription_exists(&self, subscription_id: u32) -> bool { - let subscription_state = trace_read_lock!(self.subscription_state); - subscription_state.subscription_exists(subscription_id) - } - - // Creates a user identity token according to the endpoint, policy that the client is currently connected to the - // server with. - fn user_identity_token( - &self, - server_cert: &Option, - server_nonce: &[u8], - ) -> Result<(ExtensionObject, SignatureData), StatusCode> { - let user_identity_token = &self.session_info.user_identity_token; - let user_token_type = match user_identity_token { - IdentityToken::Anonymous => UserTokenType::Anonymous, - IdentityToken::UserName(_, _) => UserTokenType::UserName, - IdentityToken::X509(_, _) => UserTokenType::Certificate, - }; - - let endpoint = &self.session_info.endpoint; - let policy = endpoint.find_policy(user_token_type); - session_debug!(self, "Endpoint policy = {:?}", policy); - - // Return the result - match policy { - None => { - session_error!( - self, - "Cannot find user token type {:?} for this endpoint, cannot connect", - user_token_type - ); - Err(StatusCode::BadSecurityPolicyRejected) - } - Some(policy) => { - let security_policy = if policy.security_policy_uri.is_null() { - // Assume None - SecurityPolicy::None - } else { - SecurityPolicy::from_uri(policy.security_policy_uri.as_ref()) - }; - if security_policy == SecurityPolicy::Unknown { - session_error!( - self, - "Can't support the security policy {}", - policy.security_policy_uri - ); - Err(StatusCode::BadSecurityPolicyRejected) - } else { - match user_identity_token { - IdentityToken::Anonymous => { - let identity_token = AnonymousIdentityToken { - policy_id: policy.policy_id.clone(), - }; - let identity_token = ExtensionObject::from_encodable( - ObjectId::AnonymousIdentityToken_Encoding_DefaultBinary, - &identity_token, - ); - Ok((identity_token, SignatureData::null())) - } - IdentityToken::UserName(ref user, ref pass) => { - let secure_channel = trace_read_lock!(self.secure_channel); - let identity_token = self.make_user_name_identity_token( - &secure_channel, - policy, - user, - pass, - )?; - let identity_token = ExtensionObject::from_encodable( - ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, - &identity_token, - ); - Ok((identity_token, SignatureData::null())) - } - IdentityToken::X509(ref cert_path, ref private_key_path) => { - if let Some(ref server_cert) = server_cert { - // The cert will be supplied to the server along with a signature to prove we have the private key to go with the cert - let certificate_data = CertificateStore::read_cert(cert_path) - .map_err(|e| { - session_error!( - self, - "Certificate cannot be loaded from path {}, error = {}", - cert_path.to_str().unwrap(), - e - ); - StatusCode::BadSecurityPolicyRejected - })?; - let private_key = CertificateStore::read_pkey(private_key_path) - .map_err(|e| { - session_error!( - self, - "Private key cannot be loaded from path {}, error = {}", - private_key_path.to_str().unwrap(), - e - ); - StatusCode::BadSecurityPolicyRejected - })?; - - // Create a signature using the X509 private key to sign the server's cert and nonce - let user_token_signature = crypto::create_signature_data( - &private_key, - security_policy, - &server_cert.as_byte_string(), - &ByteString::from(server_nonce), - )?; - - // Create identity token - let identity_token = X509IdentityToken { - policy_id: policy.policy_id.clone(), - certificate_data: certificate_data.as_byte_string(), - }; - let identity_token = ExtensionObject::from_encodable( - ObjectId::X509IdentityToken_Encoding_DefaultBinary, - &identity_token, - ); - - Ok((identity_token, user_token_signature)) - } else { - session_error!(self, "Cannot create an X509IdentityToken because the remote server has no cert with which to create a signature"); - Err(StatusCode::BadCertificateInvalid) - } - } - } - } - } - } - } - - /// Create a filled in UserNameIdentityToken by using the endpoint's token policy, the current - /// secure channel information and the user name and password. - fn make_user_name_identity_token( - &self, - secure_channel: &SecureChannel, - user_token_policy: &UserTokenPolicy, - user: &str, - pass: &str, - ) -> Result { - let channel_security_policy = secure_channel.security_policy(); - let nonce = secure_channel.remote_nonce(); - let cert = secure_channel.remote_cert(); - make_user_name_identity_token( - channel_security_policy, - user_token_policy, - nonce, - &cert, - user, - pass, - ) - } -} - -impl Service for Session { - /// Construct a request header for the session. All requests after create session are expected - /// to supply an authentication token. - fn make_request_header(&self) -> RequestHeader { - let mut session_state = trace_write_lock!(self.session_state); - session_state.make_request_header() - } - - /// Synchronously sends a request. The return value is the response to the request - fn send_request(&self, request: T) -> Result - where - T: Into, - { - let mut session_state = trace_write_lock!(self.session_state); - session_state.send_request(request) - } - - // Asynchronously sends a request. The return value is the request handle of the request - fn async_send_request( - &self, - request: T, - sender: Option>, - ) -> Result - where - T: Into, - { - let mut session_state = trace_write_lock!(self.session_state); - session_state.async_send_request(request, sender) - } -} - -impl DiscoveryService for Session { - fn find_servers(&self, endpoint_url: T) -> Result, StatusCode> - where - T: Into, - { - let request = FindServersRequest { - request_header: self.make_request_header(), - endpoint_url: endpoint_url.into(), - locale_ids: None, - server_uris: None, - }; - let response = self.send_request(request)?; - if let SupportedMessage::FindServersResponse(response) = response { - process_service_result(&response.response_header)?; - let servers = if let Some(servers) = response.servers { - servers - } else { - Vec::new() - }; - Ok(servers) - } else { - Err(process_unexpected_response(response)) - } - } - - fn get_endpoints(&self) -> Result, StatusCode> { - session_debug!(self, "get_endpoints"); - let endpoint_url = self.session_info.endpoint.endpoint_url.clone(); - - let request = GetEndpointsRequest { - request_header: self.make_request_header(), - endpoint_url, - locale_ids: None, - profile_uris: None, - }; - - let response = self.send_request(request)?; - if let SupportedMessage::GetEndpointsResponse(response) = response { - process_service_result(&response.response_header)?; - match response.endpoints { - None => { - session_debug!(self, "get_endpoints, success but no endpoints"); - Ok(Vec::new()) - } - Some(endpoints) => { - session_debug!(self, "get_endpoints, success"); - Ok(endpoints) - } - } - } else { - session_error!(self, "get_endpoints failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - - fn register_server(&self, server: RegisteredServer) -> Result<(), StatusCode> { - let request = RegisterServerRequest { - request_header: self.make_request_header(), - server, - }; - let response = self.send_request(request)?; - if let SupportedMessage::RegisterServerResponse(response) = response { - process_service_result(&response.response_header)?; - Ok(()) - } else { - Err(process_unexpected_response(response)) - } - } -} - -impl SecureChannelService for Session { - fn open_secure_channel(&self) -> Result<(), StatusCode> { - session_debug!(self, "open_secure_channel"); - let mut session_state = trace_write_lock!(self.session_state); - session_state.issue_or_renew_secure_channel(SecurityTokenRequestType::Issue) - } - - fn close_secure_channel(&self) -> Result<(), StatusCode> { - let request = CloseSecureChannelRequest { - request_header: self.make_request_header(), - }; - // We do not wait for a response because there may not be one. Just return - let _ = self.async_send_request(request, None); Ok(()) } } - -impl SessionService for Session { - fn create_session(&self) -> Result { - // Get some state stuff - let endpoint_url = self.session_info.endpoint.endpoint_url.clone(); - - let client_nonce = { - let secure_channel = trace_read_lock!(self.secure_channel); - secure_channel.local_nonce_as_byte_string() - }; - - let server_uri = UAString::null(); - let session_name = self.session_name.clone(); - - let (client_certificate, _) = { - let certificate_store = trace_write_lock!(self.certificate_store); - certificate_store.read_own_cert_and_pkey_optional() - }; - - // Security - let client_certificate = if let Some(ref client_certificate) = client_certificate { - client_certificate.as_byte_string() - } else { - ByteString::null() - }; - - // Requested session timeout should be larger than your expected subscription rate. - let requested_session_timeout = { - let session_retry_policy = trace_lock!(self.session_retry_policy); - session_retry_policy.session_timeout() - }; - - let request = CreateSessionRequest { - request_header: self.make_request_header(), - client_description: self.application_description.clone(), - server_uri, - endpoint_url, - session_name, - client_nonce, - client_certificate, - requested_session_timeout, - max_response_message_size: 0, - }; - - session_debug!(self, "CreateSessionRequest = {:?}", request); - - let response = self.send_request(request)?; - if let SupportedMessage::CreateSessionResponse(response) = response { - process_service_result(&response.response_header)?; - - let session_id = { - let mut session_state = trace_write_lock!(self.session_state); - session_state.set_session_id(response.session_id.clone()); - session_state.set_authentication_token(response.authentication_token.clone()); - { - let mut secure_channel = trace_write_lock!(self.secure_channel); - let _ = - secure_channel.set_remote_nonce_from_byte_string(&response.server_nonce); - let _ = secure_channel - .set_remote_cert_from_byte_string(&response.server_certificate); - } - // When ignoring clock skew, we calculate the time offset between the client - // and the server and use that to compensate for the difference in time. - if self.ignore_clock_skew && !response.response_header.timestamp.is_null() { - let offset = response.response_header.timestamp - DateTime::now(); - // Update the client offset by adding the new offset. - session_state.set_client_offset(offset); - } - session_state.session_id() - }; - - // session_debug!(self, "Server nonce is {:?}", response.server_nonce); - - // The server certificate is validated if the policy requires it - let security_policy = self.security_policy(); - let cert_status_code = if security_policy != SecurityPolicy::None { - if let Ok(server_certificate) = - crypto::X509::from_byte_string(&response.server_certificate) - { - // Validate server certificate against hostname and application_uri - let hostname = - hostname_from_url(self.session_info.endpoint.endpoint_url.as_ref()) - .map_err(|_| StatusCode::BadUnexpectedError)?; - let application_uri = - self.session_info.endpoint.server.application_uri.as_ref(); - - let certificate_store = trace_write_lock!(self.certificate_store); - let result = certificate_store.validate_or_reject_application_instance_cert( - &server_certificate, - security_policy, - Some(&hostname), - Some(application_uri), - ); - if result.is_bad() { - result - } else { - StatusCode::Good - } - } else { - session_error!(self, "Server did not supply a valid X509 certificate"); - StatusCode::BadCertificateInvalid - } - } else { - StatusCode::Good - }; - - if !cert_status_code.is_good() { - session_error!(self, "Server's certificate was rejected"); - Err(cert_status_code) - } else { - // Spawn a task to ping the server to keep the connection alive before the session - // timeout period. - session_debug!( - self, - "Revised session timeout is {}", - response.revised_session_timeout - ); - self.spawn_session_activity_task(response.revised_session_timeout); - self.spawn_subscription_activity_task(); - - // TODO Verify signature using server's public key (from endpoint) comparing with data made from client certificate and nonce. - // crypto::verify_signature_data(verification_key, security_policy, server_certificate, client_certificate, client_nonce); - Ok(session_id) - } - } else { - Err(process_unexpected_response(response)) - } - } - - fn activate_session(&self) -> Result<(), StatusCode> { - let (user_identity_token, user_token_signature) = { - let secure_channel = trace_read_lock!(self.secure_channel); - self.user_identity_token(&secure_channel.remote_cert(), secure_channel.remote_nonce())? - }; - - let locale_ids = if self.session_info.preferred_locales.is_empty() { - None - } else { - let locale_ids = self - .session_info - .preferred_locales - .iter() - .map(UAString::from) - .collect(); - Some(locale_ids) - }; - - let security_policy = self.security_policy(); - let client_signature = match security_policy { - SecurityPolicy::None => SignatureData::null(), - _ => { - let secure_channel = trace_read_lock!(self.secure_channel); - let server_cert = secure_channel.remote_cert(); - let server_nonce = secure_channel.remote_nonce(); - - let (_, client_pkey) = { - let certificate_store = trace_write_lock!(self.certificate_store); - certificate_store.read_own_cert_and_pkey_optional() - }; - - // Create a signature data - if client_pkey.is_none() { - session_error!(self, "Cannot create client signature - no pkey!"); - return Err(StatusCode::BadUnexpectedError); - } else if server_cert.is_none() { - session_error!( - self, - "Cannot sign server certificate because server cert is null" - ); - return Err(StatusCode::BadUnexpectedError); - } else if server_nonce.is_empty() { - session_error!( - self, - "Cannot sign server certificate because server nonce is empty" - ); - return Err(StatusCode::BadUnexpectedError); - } - - let server_cert = secure_channel - .remote_cert() - .as_ref() - .unwrap() - .as_byte_string(); - let server_nonce = ByteString::from(secure_channel.remote_nonce()); - let signing_key = client_pkey.as_ref().unwrap(); - crypto::create_signature_data( - signing_key, - security_policy, - &server_cert, - &server_nonce, - )? - } - }; - - let client_software_certificates = None; - - let request = ActivateSessionRequest { - request_header: self.make_request_header(), - client_signature, - client_software_certificates, - locale_ids, - user_identity_token, - user_token_signature, - }; - - // trace!("ActivateSessionRequest = {:#?}", request); - - let response = self.send_request(request)?; - if let SupportedMessage::ActivateSessionResponse(response) = response { - // trace!("ActivateSessionResponse = {:#?}", response); - process_service_result(&response.response_header)?; - Ok(()) - } else { - Err(process_unexpected_response(response)) - } - } - - fn cancel(&self, request_handle: IntegerId) -> Result { - let request = CancelRequest { - request_header: self.make_request_header(), - request_handle, - }; - let response = self.send_request(request)?; - if let SupportedMessage::CancelResponse(response) = response { - process_service_result(&response.response_header)?; - Ok(response.cancel_count) - } else { - Err(process_unexpected_response(response)) - } - } -} - -impl SubscriptionService for Session { - fn create_subscription( - &self, - publishing_interval: f64, - lifetime_count: u32, - max_keep_alive_count: u32, - max_notifications_per_publish: u32, - priority: u8, - publishing_enabled: bool, - callback: CB, - ) -> Result - where - CB: OnSubscriptionNotification + Send + Sync + 'static, - { - self.create_subscription_inner( - publishing_interval, - lifetime_count, - max_keep_alive_count, - max_notifications_per_publish, - priority, - publishing_enabled, - Arc::new(Mutex::new(callback)), - ) - } - - fn modify_subscription( - &self, - subscription_id: u32, - publishing_interval: f64, - lifetime_count: u32, - max_keep_alive_count: u32, - max_notifications_per_publish: u32, - priority: u8, - ) -> Result<(), StatusCode> { - if subscription_id == 0 { - session_error!(self, "modify_subscription, subscription id must be non-zero, or the subscription is considered invalid"); - Err(StatusCode::BadInvalidArgument) - } else if !self.subscription_exists(subscription_id) { - session_error!(self, "modify_subscription, subscription id does not exist"); - Err(StatusCode::BadInvalidArgument) - } else { - let request = ModifySubscriptionRequest { - request_header: self.make_request_header(), - subscription_id, - requested_publishing_interval: publishing_interval, - requested_lifetime_count: lifetime_count, - requested_max_keep_alive_count: max_keep_alive_count, - max_notifications_per_publish, - priority, - }; - let response = self.send_request(request)?; - if let SupportedMessage::ModifySubscriptionResponse(response) = response { - process_service_result(&response.response_header)?; - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state.modify_subscription( - subscription_id, - response.revised_publishing_interval, - response.revised_lifetime_count, - response.revised_max_keep_alive_count, - max_notifications_per_publish, - priority, - ); - session_debug!(self, "modify_subscription success for {}", subscription_id); - Ok(()) - } else { - session_error!(self, "modify_subscription failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn set_publishing_mode( - &self, - subscription_ids: &[u32], - publishing_enabled: bool, - ) -> Result, StatusCode> { - session_debug!( - self, - "set_publishing_mode, for subscriptions {:?}, publishing enabled {}", - subscription_ids, - publishing_enabled - ); - if subscription_ids.is_empty() { - // No subscriptions - session_error!( - self, - "set_publishing_mode, no subscription ids were provided" - ); - Err(StatusCode::BadNothingToDo) - } else { - let request = SetPublishingModeRequest { - request_header: self.make_request_header(), - publishing_enabled, - subscription_ids: Some(subscription_ids.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::SetPublishingModeResponse(response) = response { - process_service_result(&response.response_header)?; - { - // Clear out all subscriptions, assuming the delete worked - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state.set_publishing_mode(subscription_ids, publishing_enabled); - } - session_debug!(self, "set_publishing_mode success"); - Ok(response.results.unwrap()) - } else { - session_error!(self, "set_publishing_mode failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn transfer_subscriptions( - &self, - subscription_ids: &[u32], - send_initial_values: bool, - ) -> Result, StatusCode> { - if subscription_ids.is_empty() { - // No subscriptions - session_error!( - self, - "set_publishing_mode, no subscription ids were provided" - ); - Err(StatusCode::BadNothingToDo) - } else { - let request = TransferSubscriptionsRequest { - request_header: self.make_request_header(), - subscription_ids: Some(subscription_ids.to_vec()), - send_initial_values, - }; - let response = self.send_request(request)?; - if let SupportedMessage::TransferSubscriptionsResponse(response) = response { - process_service_result(&response.response_header)?; - session_debug!(self, "transfer_subscriptions success"); - Ok(response.results.unwrap()) - } else { - session_error!(self, "transfer_subscriptions failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn delete_subscription(&self, subscription_id: u32) -> Result { - if subscription_id == 0 { - session_error!(self, "delete_subscription, subscription id 0 is invalid"); - Err(StatusCode::BadInvalidArgument) - } else if !self.subscription_exists(subscription_id) { - session_error!( - self, - "delete_subscription, subscription id {} does not exist", - subscription_id - ); - Err(StatusCode::BadInvalidArgument) - } else { - let result = self.delete_subscriptions(&[subscription_id][..])?; - Ok(result[0]) - } - } - - fn delete_subscriptions( - &self, - subscription_ids: &[u32], - ) -> Result, StatusCode> { - if subscription_ids.is_empty() { - // No subscriptions - session_trace!(self, "delete_subscriptions with no subscriptions"); - Err(StatusCode::BadNothingToDo) - } else { - // Send a delete request holding all the subscription ides that we wish to delete - let request = DeleteSubscriptionsRequest { - request_header: self.make_request_header(), - subscription_ids: Some(subscription_ids.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::DeleteSubscriptionsResponse(response) = response { - process_service_result(&response.response_header)?; - { - // Clear out deleted subscriptions, assuming the delete worked - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_ids.iter().for_each(|id| { - let _ = subscription_state.delete_subscription(*id); - }); - } - session_debug!(self, "delete_subscriptions success"); - Ok(response.results.unwrap()) - } else { - session_error!(self, "delete_subscriptions failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } -} - -impl NodeManagementService for Session { - fn add_nodes(&self, nodes_to_add: &[AddNodesItem]) -> Result, StatusCode> { - if nodes_to_add.is_empty() { - session_error!(self, "add_nodes, called with no nodes to add"); - Err(StatusCode::BadNothingToDo) - } else { - let request = AddNodesRequest { - request_header: self.make_request_header(), - nodes_to_add: Some(nodes_to_add.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::AddNodesResponse(response) = response { - Ok(response.results.unwrap()) - } else { - Err(process_unexpected_response(response)) - } - } - } - - fn add_references( - &self, - references_to_add: &[AddReferencesItem], - ) -> Result, StatusCode> { - if references_to_add.is_empty() { - session_error!(self, "add_references, called with no references to add"); - Err(StatusCode::BadNothingToDo) - } else { - let request = AddReferencesRequest { - request_header: self.make_request_header(), - references_to_add: Some(references_to_add.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::AddReferencesResponse(response) = response { - Ok(response.results.unwrap()) - } else { - Err(process_unexpected_response(response)) - } - } - } - - fn delete_nodes( - &self, - nodes_to_delete: &[DeleteNodesItem], - ) -> Result, StatusCode> { - if nodes_to_delete.is_empty() { - session_error!(self, "delete_nodes, called with no nodes to delete"); - Err(StatusCode::BadNothingToDo) - } else { - let request = DeleteNodesRequest { - request_header: self.make_request_header(), - nodes_to_delete: Some(nodes_to_delete.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::DeleteNodesResponse(response) = response { - Ok(response.results.unwrap()) - } else { - Err(process_unexpected_response(response)) - } - } - } - - fn delete_references( - &self, - references_to_delete: &[DeleteReferencesItem], - ) -> Result, StatusCode> { - if references_to_delete.is_empty() { - session_error!( - self, - "delete_references, called with no references to delete" - ); - Err(StatusCode::BadNothingToDo) - } else { - let request = DeleteReferencesRequest { - request_header: self.make_request_header(), - references_to_delete: Some(references_to_delete.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::DeleteReferencesResponse(response) = response { - Ok(response.results.unwrap()) - } else { - Err(process_unexpected_response(response)) - } - } - } -} - -impl MonitoredItemService for Session { - fn create_monitored_items( - &self, - subscription_id: u32, - timestamps_to_return: TimestampsToReturn, - items_to_create: &[MonitoredItemCreateRequest], - ) -> Result, StatusCode> { - session_debug!( - self, - "create_monitored_items, for subscription {}, {} items", - subscription_id, - items_to_create.len() - ); - if subscription_id == 0 { - session_error!(self, "create_monitored_items, subscription id 0 is invalid"); - Err(StatusCode::BadInvalidArgument) - } else if !self.subscription_exists(subscription_id) { - session_error!( - self, - "create_monitored_items, subscription id {} does not exist", - subscription_id - ); - Err(StatusCode::BadInvalidArgument) - } else if items_to_create.is_empty() { - session_error!( - self, - "create_monitored_items, called with no items to create" - ); - Err(StatusCode::BadNothingToDo) - } else { - // Assign each item a unique client handle - let mut items_to_create = items_to_create.to_vec(); - { - let mut session_state = trace_write_lock!(self.session_state); - items_to_create.iter_mut().for_each(|i| { - //if user doesn't specify a valid client_handle - if i.requested_parameters.client_handle == 0 { - i.requested_parameters.client_handle = - session_state.next_monitored_item_handle(); - } - }); - } - - let request = CreateMonitoredItemsRequest { - request_header: self.make_request_header(), - subscription_id, - timestamps_to_return, - items_to_create: Some(items_to_create.clone()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::CreateMonitoredItemsResponse(response) = response { - process_service_result(&response.response_header)?; - if let Some(ref results) = response.results { - session_debug!( - self, - "create_monitored_items, {} items created", - items_to_create.len() - ); - // Set the items in our internal state - let items_to_create = items_to_create - .iter() - .zip(results) - .map(|(i, r)| subscription::CreateMonitoredItem { - id: r.monitored_item_id, - client_handle: i.requested_parameters.client_handle, - discard_oldest: i.requested_parameters.discard_oldest, - item_to_monitor: i.item_to_monitor.clone(), - monitoring_mode: i.monitoring_mode, - queue_size: r.revised_queue_size, - sampling_interval: r.revised_sampling_interval, - }) - .collect::>(); - { - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state - .insert_monitored_items(subscription_id, &items_to_create); - } - } else { - session_debug!( - self, - "create_monitored_items, success but no monitored items were created" - ); - } - Ok(response.results.unwrap()) - } else { - session_error!(self, "create_monitored_items failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn modify_monitored_items( - &self, - subscription_id: u32, - timestamps_to_return: TimestampsToReturn, - items_to_modify: &[MonitoredItemModifyRequest], - ) -> Result, StatusCode> { - session_debug!( - self, - "modify_monitored_items, for subscription {}, {} items", - subscription_id, - items_to_modify.len() - ); - if subscription_id == 0 { - session_error!(self, "modify_monitored_items, subscription id 0 is invalid"); - Err(StatusCode::BadInvalidArgument) - } else if !self.subscription_exists(subscription_id) { - session_error!( - self, - "modify_monitored_items, subscription id {} does not exist", - subscription_id - ); - Err(StatusCode::BadInvalidArgument) - } else if items_to_modify.is_empty() { - session_error!( - self, - "modify_monitored_items, called with no items to modify" - ); - Err(StatusCode::BadNothingToDo) - } else { - let monitored_item_ids = items_to_modify - .iter() - .map(|i| i.monitored_item_id) - .collect::>(); - let request = ModifyMonitoredItemsRequest { - request_header: self.make_request_header(), - subscription_id, - timestamps_to_return, - items_to_modify: Some(items_to_modify.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::ModifyMonitoredItemsResponse(response) = response { - process_service_result(&response.response_header)?; - if let Some(ref results) = response.results { - // Set the items in our internal state - let items_to_modify = monitored_item_ids - .iter() - .zip(results.iter()) - .map(|(id, r)| subscription::ModifyMonitoredItem { - id: *id, - queue_size: r.revised_queue_size, - sampling_interval: r.revised_sampling_interval, - }) - .collect::>(); - { - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state - .modify_monitored_items(subscription_id, &items_to_modify); - } - } - session_debug!(self, "modify_monitored_items, success"); - Ok(response.results.unwrap()) - } else { - session_error!(self, "modify_monitored_items failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn set_monitoring_mode( - &self, - subscription_id: u32, - monitoring_mode: MonitoringMode, - monitored_item_ids: &[u32], - ) -> Result, StatusCode> { - if monitored_item_ids.is_empty() { - session_error!(self, "set_monitoring_mode, called with nothing to do"); - Err(StatusCode::BadNothingToDo) - } else { - let request = { - let monitored_item_ids = Some(monitored_item_ids.to_vec()); - SetMonitoringModeRequest { - request_header: self.make_request_header(), - subscription_id, - monitoring_mode, - monitored_item_ids, - } - }; - let response = self.send_request(request)?; - if let SupportedMessage::SetMonitoringModeResponse(response) = response { - Ok(response.results.unwrap()) - } else { - session_error!(self, "set_monitoring_mode failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn set_triggering( - &self, - subscription_id: u32, - triggering_item_id: u32, - links_to_add: &[u32], - links_to_remove: &[u32], - ) -> Result<(Option>, Option>), StatusCode> { - if links_to_add.is_empty() && links_to_remove.is_empty() { - session_error!(self, "set_triggering, called with nothing to add or remove"); - Err(StatusCode::BadNothingToDo) - } else { - let request = { - let links_to_add = if links_to_add.is_empty() { - None - } else { - Some(links_to_add.to_vec()) - }; - let links_to_remove = if links_to_remove.is_empty() { - None - } else { - Some(links_to_remove.to_vec()) - }; - SetTriggeringRequest { - request_header: self.make_request_header(), - subscription_id, - triggering_item_id, - links_to_add, - links_to_remove, - } - }; - let response = self.send_request(request)?; - if let SupportedMessage::SetTriggeringResponse(response) = response { - // Update client side state - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state.set_triggering( - subscription_id, - triggering_item_id, - links_to_add, - links_to_remove, - ); - Ok((response.add_results, response.remove_results)) - } else { - session_error!(self, "set_triggering failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn delete_monitored_items( - &self, - subscription_id: u32, - items_to_delete: &[u32], - ) -> Result, StatusCode> { - session_debug!( - self, - "delete_monitored_items, subscription {} for {} items", - subscription_id, - items_to_delete.len() - ); - if subscription_id == 0 { - session_error!(self, "delete_monitored_items, subscription id 0 is invalid"); - Err(StatusCode::BadInvalidArgument) - } else if !self.subscription_exists(subscription_id) { - session_error!( - self, - "delete_monitored_items, subscription id {} does not exist", - subscription_id - ); - Err(StatusCode::BadInvalidArgument) - } else if items_to_delete.is_empty() { - session_error!( - self, - "delete_monitored_items, called with no items to delete" - ); - Err(StatusCode::BadNothingToDo) - } else { - let request = DeleteMonitoredItemsRequest { - request_header: self.make_request_header(), - subscription_id, - monitored_item_ids: Some(items_to_delete.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::DeleteMonitoredItemsResponse(response) = response { - process_service_result(&response.response_header)?; - if response.results.is_some() { - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state.delete_monitored_items(subscription_id, items_to_delete); - } - session_debug!(self, "delete_monitored_items, success"); - Ok(response.results.unwrap()) - } else { - session_error!(self, "delete_monitored_items failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } -} - -impl ViewService for Session { - fn browse( - &self, - nodes_to_browse: &[BrowseDescription], - ) -> Result>, StatusCode> { - if nodes_to_browse.is_empty() { - session_error!(self, "browse, was not supplied with any nodes to browse"); - Err(StatusCode::BadNothingToDo) - } else { - let request = BrowseRequest { - request_header: self.make_request_header(), - view: ViewDescription { - view_id: NodeId::null(), - timestamp: DateTime::null(), - view_version: 0, - }, - requested_max_references_per_node: 1000, - nodes_to_browse: Some(nodes_to_browse.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::BrowseResponse(response) = response { - session_debug!(self, "browse, success"); - process_service_result(&response.response_header)?; - Ok(response.results) - } else { - session_error!(self, "browse failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn browse_next( - &self, - release_continuation_points: bool, - continuation_points: &[ByteString], - ) -> Result>, StatusCode> { - if continuation_points.is_empty() { - Err(StatusCode::BadNothingToDo) - } else { - let request = BrowseNextRequest { - request_header: self.make_request_header(), - continuation_points: Some(continuation_points.to_vec()), - release_continuation_points, - }; - let response = self.send_request(request)?; - if let SupportedMessage::BrowseNextResponse(response) = response { - session_debug!(self, "browse_next, success"); - process_service_result(&response.response_header)?; - Ok(response.results) - } else { - session_error!(self, "browse_next failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn translate_browse_paths_to_node_ids( - &self, - browse_paths: &[BrowsePath], - ) -> Result, StatusCode> { - if browse_paths.is_empty() { - session_error!( - self, - "translate_browse_paths_to_node_ids, was not supplied with any browse paths" - ); - Err(StatusCode::BadNothingToDo) - } else { - let request = TranslateBrowsePathsToNodeIdsRequest { - request_header: self.make_request_header(), - browse_paths: Some(browse_paths.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::TranslateBrowsePathsToNodeIdsResponse(response) = response { - session_debug!(self, "translate_browse_paths_to_node_ids, success"); - process_service_result(&response.response_header)?; - Ok(response.results.unwrap_or_default()) - } else { - session_error!( - self, - "translate_browse_paths_to_node_ids failed {:?}", - response - ); - Err(process_unexpected_response(response)) - } - } - } - - fn register_nodes(&self, nodes_to_register: &[NodeId]) -> Result, StatusCode> { - if nodes_to_register.is_empty() { - session_error!( - self, - "register_nodes, was not supplied with any nodes to register" - ); - Err(StatusCode::BadNothingToDo) - } else { - let request = RegisterNodesRequest { - request_header: self.make_request_header(), - nodes_to_register: Some(nodes_to_register.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::RegisterNodesResponse(response) = response { - session_debug!(self, "register_nodes, success"); - process_service_result(&response.response_header)?; - Ok(response.registered_node_ids.unwrap()) - } else { - session_error!(self, "register_nodes failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn unregister_nodes(&self, nodes_to_unregister: &[NodeId]) -> Result<(), StatusCode> { - if nodes_to_unregister.is_empty() { - session_error!( - self, - "unregister_nodes, was not supplied with any nodes to unregister" - ); - Err(StatusCode::BadNothingToDo) - } else { - let request = UnregisterNodesRequest { - request_header: self.make_request_header(), - nodes_to_unregister: Some(nodes_to_unregister.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::UnregisterNodesResponse(response) = response { - session_debug!(self, "unregister_nodes, success"); - process_service_result(&response.response_header)?; - Ok(()) - } else { - session_error!(self, "unregister_nodes failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } -} - -impl MethodService for Session { - fn call(&self, method: T) -> Result - where - T: Into, - { - session_debug!(self, "call()"); - let methods_to_call = Some(vec![method.into()]); - let request = CallRequest { - request_header: self.make_request_header(), - methods_to_call, - }; - let response = self.send_request(request)?; - if let SupportedMessage::CallResponse(response) = response { - if let Some(mut results) = response.results { - if results.len() != 1 { - session_error!( - self, - "call(), expecting a result from the call to the server, got {} results", - results.len() - ); - Err(StatusCode::BadUnexpectedError) - } else { - Ok(results.remove(0)) - } - } else { - session_error!( - self, - "call(), expecting a result from the call to the server, got nothing" - ); - Err(StatusCode::BadUnexpectedError) - } - } else { - Err(process_unexpected_response(response)) - } - } -} - -impl AttributeService for Session { - fn read( - &self, - nodes_to_read: &[ReadValueId], - timestamps_to_return: TimestampsToReturn, - max_age: f64, - ) -> Result, StatusCode> { - if nodes_to_read.is_empty() { - // No subscriptions - session_error!(self, "read(), was not supplied with any nodes to read"); - Err(StatusCode::BadNothingToDo) - } else { - session_debug!(self, "read() requested to read nodes {:?}", nodes_to_read); - let request = ReadRequest { - request_header: self.make_request_header(), - max_age, - timestamps_to_return, - nodes_to_read: Some(nodes_to_read.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::ReadResponse(response) = response { - session_debug!(self, "read(), success"); - process_service_result(&response.response_header)?; - let results = if let Some(results) = response.results { - results - } else { - Vec::new() - }; - Ok(results) - } else { - session_error!(self, "read() value failed"); - Err(process_unexpected_response(response)) - } - } - } - - fn history_read( - &self, - history_read_details: HistoryReadAction, - timestamps_to_return: TimestampsToReturn, - release_continuation_points: bool, - nodes_to_read: &[HistoryReadValueId], - ) -> Result, StatusCode> { - // Turn the enum into an extension object - let history_read_details = ExtensionObject::from(history_read_details); - let request = HistoryReadRequest { - request_header: self.make_request_header(), - history_read_details, - timestamps_to_return, - release_continuation_points, - nodes_to_read: if nodes_to_read.is_empty() { - None - } else { - Some(nodes_to_read.to_vec()) - }, - }; - session_debug!( - self, - "history_read() requested to read nodes {:?}", - nodes_to_read - ); - let response = self.send_request(request)?; - if let SupportedMessage::HistoryReadResponse(response) = response { - session_debug!(self, "history_read(), success"); - process_service_result(&response.response_header)?; - let results = if let Some(results) = response.results { - results - } else { - Vec::new() - }; - Ok(results) - } else { - session_error!(self, "history_read() value failed"); - Err(process_unexpected_response(response)) - } - } - - fn write(&self, nodes_to_write: &[WriteValue]) -> Result, StatusCode> { - if nodes_to_write.is_empty() { - // No subscriptions - session_error!(self, "write() was not supplied with any nodes to write"); - Err(StatusCode::BadNothingToDo) - } else { - let request = WriteRequest { - request_header: self.make_request_header(), - nodes_to_write: Some(nodes_to_write.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::WriteResponse(response) = response { - session_debug!(self, "write(), success"); - process_service_result(&response.response_header)?; - Ok(response.results.unwrap_or_default()) - } else { - session_error!(self, "write() failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } - - fn history_update( - &self, - history_update_details: &[HistoryUpdateAction], - ) -> Result, StatusCode> { - if history_update_details.is_empty() { - // No subscriptions - session_error!( - self, - "history_update(), was not supplied with any detail to update" - ); - Err(StatusCode::BadNothingToDo) - } else { - // Turn the enums into ExtensionObjects - let history_update_details = history_update_details - .iter() - .map(|action| ExtensionObject::from(action)) - .collect::>(); - - let request = HistoryUpdateRequest { - request_header: self.make_request_header(), - history_update_details: Some(history_update_details.to_vec()), - }; - let response = self.send_request(request)?; - if let SupportedMessage::HistoryUpdateResponse(response) = response { - session_debug!(self, "history_update(), success"); - process_service_result(&response.response_header)?; - let results = if let Some(results) = response.results { - results - } else { - Vec::new() - }; - Ok(results) - } else { - session_error!(self, "history_update() failed {:?}", response); - Err(process_unexpected_response(response)) - } - } - } -} diff --git a/lib/src/client/session/session_state.rs b/lib/src/client/session/session_state.rs deleted file mode 100644 index 89acd8c7c..000000000 --- a/lib/src/client/session/session_state.rs +++ /dev/null @@ -1,602 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use std::{ - sync::{ - atomic::{AtomicU32, Ordering}, - mpsc::{self, Receiver, SyncSender}, - Arc, - }, - u32, -}; - -use chrono::Duration; -use tokio::time::Instant; - -use crate::{ - client::{ - callbacks::{OnConnectionStatusChange, OnSessionClosed}, - message_queue::MessageQueue, - process_unexpected_response, - session::{session_debug, session_trace}, - subscription_state::SubscriptionState, - }, - core::{ - comms::secure_channel::SecureChannel, handle::Handle, supported_message::SupportedMessage, - }, - crypto::SecurityPolicy, - sync::*, - types::{status_code::StatusCode, *}, -}; - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum ConnectionState { - /// No connect has been made yet - NotStarted, - /// Connecting - Connecting, - /// Connection success - Connected, - // Waiting for ACK from the server - WaitingForAck, - // Connection is running - Processing, - // Connection is finished, possibly after an error - Finished(StatusCode), -} - -#[derive(Clone)] -/// A manager for the connection status with some helpers for common actions. -pub(crate) struct ConnectionStateMgr { - state: Arc>, -} - -impl ConnectionStateMgr { - pub fn new() -> Self { - Self { - state: Arc::new(RwLock::new(ConnectionState::NotStarted)), - } - } - - pub fn state(&self) -> ConnectionState { - let connection_state = trace_read_lock!(self.state); - *connection_state - } - - pub fn set_state(&self, state: ConnectionState) { - trace!("setting connection state to {:?}", state); - let mut connection_state = trace_write_lock!(self.state); - *connection_state = state; - } - - pub fn set_finished(&self, finished_code: StatusCode) { - self.set_state(ConnectionState::Finished(finished_code)); - } - - pub fn is_connected(&self) -> bool { - !matches!( - self.state(), - ConnectionState::NotStarted - | ConnectionState::Connecting - | ConnectionState::Finished(_) - ) - } - - pub fn is_finished(&self) -> bool { - matches!(self.state(), ConnectionState::Finished(_)) - } -} - -lazy_static! { - static ref NEXT_SESSION_ID: AtomicU32 = AtomicU32::new(1); -} - -/// Session's state indicates connection status, negotiated times and sizes, -/// and security tokens. -pub(crate) struct SessionState { - /// A unique identifier for the session, this is NOT the session id assigned after a session is created - id: u32, - /// Time offset between the client and the server. - client_offset: Duration, - /// Ignore clock skew between the client and the server. - ignore_clock_skew: bool, - /// Secure channel information - secure_channel: Arc>, - /// Connection state - what the session's connection is currently doing - connection_state: ConnectionStateMgr, - /// The request timeout is how long the session will wait from sending a request expecting a response - /// if no response is received the client will terminate. - request_timeout: u32, - /// Size of the send buffer - send_buffer_size: usize, - /// Size of the - receive_buffer_size: usize, - /// Maximum message size - max_message_size: usize, - /// Maximum chunk size - max_chunk_count: usize, - /// The session's id assigned after a connection and used for diagnostic info - session_id: NodeId, - /// The session authentication token, used for session activation - authentication_token: NodeId, - /// The next handle to assign to a request - request_handle: Handle, - /// Next monitored item client side handle - monitored_item_handle: Handle, - /// Subscription acknowledgements pending for send - subscription_acknowledgements: Vec, - /// Subscription state - subscription_state: Arc>, - /// Connection closed callback - session_closed_callback: Option>, - /// Connection status callback - connection_status_callback: Option>, - /// Message queue. - pub(crate) message_queue: Arc>, -} - -impl OnSessionClosed for SessionState { - fn on_session_closed(&mut self, status_code: StatusCode) { - debug!("Session was closed with status = {}", status_code); - if let Some(ref mut session_closed_callback) = self.session_closed_callback { - session_closed_callback.on_session_closed(status_code); - } - } -} - -impl Drop for SessionState { - fn drop(&mut self) { - info!("SessionState has dropped"); - } -} - -impl SessionState { - const FIRST_REQUEST_HANDLE: u32 = 1; - const FIRST_MONITORED_ITEM_HANDLE: u32 = 1000; - - const DEFAULT_REQUEST_TIMEOUT: u32 = 10 * 1000; - const SEND_BUFFER_SIZE: usize = 65535; - const RECEIVE_BUFFER_SIZE: usize = 65535; - const MAX_BUFFER_SIZE: usize = 65535; - - pub fn new( - ignore_clock_skew: bool, - secure_channel: Arc>, - subscription_state: Arc>, - ) -> SessionState { - let id = NEXT_SESSION_ID.fetch_add(1, Ordering::Relaxed); - SessionState { - id, - client_offset: Duration::zero(), - ignore_clock_skew, - secure_channel, - connection_state: ConnectionStateMgr::new(), - request_timeout: Self::DEFAULT_REQUEST_TIMEOUT, - send_buffer_size: Self::SEND_BUFFER_SIZE, - receive_buffer_size: Self::RECEIVE_BUFFER_SIZE, - max_message_size: Self::MAX_BUFFER_SIZE, - max_chunk_count: constants::MAX_CHUNK_COUNT, - request_handle: Handle::new(Self::FIRST_REQUEST_HANDLE), - session_id: NodeId::null(), - authentication_token: NodeId::null(), - monitored_item_handle: Handle::new(Self::FIRST_MONITORED_ITEM_HANDLE), - subscription_acknowledgements: Vec::new(), - subscription_state, - session_closed_callback: None, - connection_status_callback: None, - message_queue: Arc::new(RwLock::new(MessageQueue::new())), - } - } - - pub fn id(&self) -> u32 { - self.id - } - - pub fn set_client_offset(&mut self, offset: Duration) { - self.client_offset = self.client_offset + offset; - debug!("Client offset set to {}", self.client_offset); - } - - pub fn set_session_id(&mut self, session_id: NodeId) { - self.session_id = session_id - } - - pub fn session_id(&self) -> NodeId { - self.session_id.clone() - } - - pub fn receive_buffer_size(&self) -> usize { - self.receive_buffer_size - } - - pub fn max_message_size(&self) -> usize { - self.max_message_size - } - - pub fn max_chunk_count(&self) -> usize { - self.max_chunk_count - } - - pub fn request_timeout(&self) -> u32 { - self.request_timeout - } - - pub fn send_buffer_size(&self) -> usize { - self.send_buffer_size - } - - pub fn add_subscription_acknowledgement( - &mut self, - subscription_acknowledgement: SubscriptionAcknowledgement, - ) { - self.subscription_acknowledgements - .push(subscription_acknowledgement); - } - - pub fn set_authentication_token(&mut self, authentication_token: NodeId) { - self.authentication_token = authentication_token; - } - - pub fn set_session_closed_callback(&mut self, session_closed_callback: CB) - where - CB: OnSessionClosed + Send + Sync + 'static, - { - self.session_closed_callback = Some(Box::new(session_closed_callback)); - } - - pub fn set_connection_status_callback(&mut self, connection_status_callback: CB) - where - CB: OnConnectionStatusChange + Send + Sync + 'static, - { - self.connection_status_callback = Some(Box::new(connection_status_callback)); - } - - pub(crate) fn on_connection_status_change(&mut self, connected: bool) { - if let Some(ref mut connection_status) = self.connection_status_callback { - connection_status.on_connection_status_change(connected); - } - } - - pub(crate) fn connection_state(&self) -> ConnectionStateMgr { - self.connection_state.clone() - } - - /// Construct a request header for the session. All requests after create session are expected - /// to supply an authentication token. - pub fn make_request_header(&mut self) -> RequestHeader { - RequestHeader { - authentication_token: self.authentication_token.clone(), - timestamp: DateTime::now_with_offset(self.client_offset), - request_handle: self.request_handle.next(), - return_diagnostics: DiagnosticBits::empty(), - timeout_hint: self.request_timeout, - ..Default::default() - } - } - - /// Sends a publish request containing acknowledgements for previous notifications. - pub fn async_publish(&mut self) -> Result { - let subscription_acknowledgements = if self.subscription_acknowledgements.is_empty() { - None - } else { - let subscription_acknowledgements: Vec = - self.subscription_acknowledgements.drain(..).collect(); - // Debug sequence nrs - if log_enabled!(log::Level::Debug) { - let sequence_nrs: Vec = subscription_acknowledgements - .iter() - .map(|ack| ack.sequence_number) - .collect(); - debug!( - "async_publish is acknowledging subscription acknowledgements with sequence nrs {:?}", - sequence_nrs - ); - } - Some(subscription_acknowledgements) - }; - let request = PublishRequest { - request_header: self.make_request_header(), - subscription_acknowledgements, - }; - let request_handle = self.async_send_request(request, None)?; - - { - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state.set_last_publish_request(Instant::now()); - } - - debug!("async_publish, request sent with handle {}", request_handle); - Ok(request_handle) - } - - /// Synchronously sends a request. The return value is the response to the request - pub(crate) fn send_request(&mut self, request: T) -> Result - where - T: Into, - { - // A channel is created to receive the response - let (sender, receiver) = mpsc::sync_channel(1); - // Send the request - let request_handle = self.async_send_request(request, Some(sender))?; - // Wait for the response - let request_timeout = self.request_timeout(); - self.wait_for_sync_response(request_handle, request_timeout, receiver) - } - - pub(crate) fn reset(&mut self) { - // Clear tokens, ids etc. - self.session_id = NodeId::null(); - self.authentication_token = NodeId::null(); - self.request_handle.reset(); - self.monitored_item_handle.reset(); - - // Clear the message queue - { - let mut message_queue = trace_write_lock!(self.message_queue); - message_queue.clear(); - }; - } - - /// Asynchronously sends a request. The return value is the request handle of the request - pub(crate) fn async_send_request( - &mut self, - request: T, - sender: Option>, - ) -> Result - where - T: Into, - { - let request = request.into(); - match request { - SupportedMessage::OpenSecureChannelRequest(_) - | SupportedMessage::CloseSecureChannelRequest(_) => {} - _ => { - // Make sure secure channel token hasn't expired - let _ = self.ensure_secure_channel_token(); - } - } - - // TODO should error here if not connected - - // Enqueue the request - let request_handle = request.request_handle(); - self.add_request(request, sender); - - Ok(request_handle) - } - - pub(crate) fn quit(&self) { - let message_queue = trace_read_lock!(self.message_queue); - message_queue.quit(); - } - - /// Wait for a response with a matching request handle. If request handle is 0 then no match - /// is performed and in fact the function is expected to receive no messages except asynchronous - /// and housekeeping events from the server. A 0 handle will cause the wait to process at most - /// one async message before returning. - fn wait_for_sync_response( - &mut self, - request_handle: u32, - request_timeout: u32, - receiver: Receiver, - ) -> Result { - if request_handle == 0 { - panic!("Request handle must be non zero"); - } - // Receive messages until the one expected comes back. Publish responses will be consumed - // silently. - let request_timeout = std::time::Duration::from_millis(request_timeout as u64); - receiver.recv_timeout(request_timeout).map_err(|_| { - info!("Timeout waiting for response from server"); - self.request_has_timed_out(request_handle); - StatusCode::BadTimeout - }) - } - - fn request_has_timed_out(&self, request_handle: u32) { - let mut message_queue = trace_write_lock!(self.message_queue); - message_queue.request_has_timed_out(request_handle) - } - - fn add_request( - &mut self, - request: SupportedMessage, - sender: Option>, - ) { - let mut message_queue = trace_write_lock!(self.message_queue); - message_queue.add_request(request, sender) - } - - /// Checks if secure channel token needs to be renewed and renews it - fn ensure_secure_channel_token(&mut self) -> Result<(), StatusCode> { - let should_renew_security_token = { - let secure_channel = trace_read_lock!(self.secure_channel); - secure_channel.should_renew_security_token() - }; - if should_renew_security_token { - self.issue_or_renew_secure_channel(SecurityTokenRequestType::Renew) - } else { - Ok(()) - } - } - - pub(crate) fn issue_or_renew_secure_channel( - &mut self, - request_type: SecurityTokenRequestType, - ) -> Result<(), StatusCode> { - trace!("issue_or_renew_secure_channel({:?})", request_type); - - const REQUESTED_LIFETIME: u32 = 60000; // TODO - - let (security_mode, security_policy, client_nonce) = { - let mut secure_channel = trace_write_lock!(self.secure_channel); - let client_nonce = secure_channel.security_policy().random_nonce(); - secure_channel.set_local_nonce(client_nonce.as_ref()); - ( - secure_channel.security_mode(), - secure_channel.security_policy(), - client_nonce, - ) - }; - - info!("Making secure channel request"); - info!("security_mode = {:?}", security_mode); - info!("security_policy = {:?}", security_policy); - - let requested_lifetime = REQUESTED_LIFETIME; - let request = OpenSecureChannelRequest { - request_header: self.make_request_header(), - client_protocol_version: 0, - request_type, - security_mode, - client_nonce, - requested_lifetime, - }; - let response = self.send_request(request)?; - if let SupportedMessage::OpenSecureChannelResponse(response) = response { - // Extract the security token from the response. - let mut security_token = response.security_token.clone(); - - // When ignoring clock skew, we calculate the time offset between the client and the - // server and use that offset to compensate for the difference in time when setting - // the timestamps in the request headers and when decoding timestamps in messages - // received from the server. - if self.ignore_clock_skew && !response.response_header.timestamp.is_null() { - let offset = response.response_header.timestamp - DateTime::now(); - // Make sure to apply the offset to the security token in the current response. - security_token.created_at = security_token.created_at - offset; - // Update the client offset by adding the new offset. When the secure channel is - // renewed its already using the client offset calculated when issuing the secure - // channel and only needs to be updated to accommodate any additional clock skew. - self.set_client_offset(offset); - } - - debug!("Setting transport's security token"); - { - let mut secure_channel = trace_write_lock!(self.secure_channel); - secure_channel.set_client_offset(self.client_offset); - secure_channel.set_security_token(security_token); - - if security_policy != SecurityPolicy::None - && (security_mode == MessageSecurityMode::Sign - || security_mode == MessageSecurityMode::SignAndEncrypt) - { - secure_channel.set_remote_nonce_from_byte_string(&response.server_nonce)?; - secure_channel.derive_keys(); - } - } - Ok(()) - } else { - Err(process_unexpected_response(response)) - } - } - - // Process any async messages we expect to receive - pub(crate) fn handle_publish_responses(&mut self) -> bool { - let responses = { - let mut message_queue = trace_write_lock!(self.message_queue); - message_queue.async_responses() - }; - if responses.is_empty() { - false - } else { - session_debug!(self, "Processing {} async messages", responses.len()); - for response in responses { - self.handle_async_response(response); - } - true - } - } - - /// This is the handler for asynchronous responses which are currently assumed to be publish - /// responses. It maintains the acknowledgements to be sent and sends the data change - /// notifications to the client for processing. - fn handle_async_response(&mut self, response: SupportedMessage) { - session_debug!(self, "handle_async_response"); - match response { - SupportedMessage::PublishResponse(response) => { - session_debug!(self, "PublishResponse"); - - // Update subscriptions based on response - // Queue acknowledgements for next request - let notification_message = response.notification_message.clone(); - let subscription_id = response.subscription_id; - - // Queue an acknowledgement for this request (if it has data) - if let Some(ref notification_data) = notification_message.notification_data { - if !notification_data.is_empty() { - self.add_subscription_acknowledgement(SubscriptionAcknowledgement { - subscription_id, - sequence_number: notification_message.sequence_number, - }); - } - } - - let decoding_options = { - let secure_channel = trace_read_lock!(self.secure_channel); - secure_channel.decoding_options() - }; - - // Process data change notifications - if let Some((data_change_notifications, events)) = - notification_message.notifications(&decoding_options) - { - session_debug!( - self, - "Received notifications, data changes = {}, events = {}", - data_change_notifications.len(), - events.len() - ); - if !data_change_notifications.is_empty() { - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state - .on_data_change(subscription_id, &data_change_notifications); - } - if !events.is_empty() { - let mut subscription_state = trace_write_lock!(self.subscription_state); - subscription_state.on_event(subscription_id, &events); - } - } - - // Send another publish request - let _ = self.async_publish(); - } - SupportedMessage::ServiceFault(response) => { - let service_result = response.response_header.service_result; - session_debug!( - self, - "Service fault received with {} error code", - service_result - ); - session_trace!(self, "ServiceFault {:?}", response); - - match service_result { - StatusCode::BadTimeout => { - debug!("Publish request timed out so sending another"); - let _ = self.async_publish(); - } - StatusCode::BadTooManyPublishRequests => { - // Turn off publish requests until server says otherwise - debug!("Server tells us too many publish requests so waiting for a response before resuming"); - } - StatusCode::BadSessionClosed - | StatusCode::BadSessionIdInvalid - | StatusCode::BadNoSubscription - | StatusCode::BadSubscriptionIdInvalid => { - self.on_session_closed(service_result) - } - _ => (), - } - } - _ => { - info!("{} unhandled response", self.session_id()); - } - } - } - - /// Returns the next monitored item handle - pub fn next_monitored_item_handle(&mut self) -> u32 { - self.monitored_item_handle.next() - } -} diff --git a/lib/src/client/session_retry_policy.rs b/lib/src/client/session_retry_policy.rs deleted file mode 100644 index 9997159b3..000000000 --- a/lib/src/client/session_retry_policy.rs +++ /dev/null @@ -1,209 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -use crate::types::date_time::DateTime; - -use chrono::Duration; - -#[derive(PartialEq, Debug)] -pub enum Answer { - /// Retry immediately - Retry, - /// Wait for this many milliseconds - WaitFor(u32), - /// Give up reconnecting - GiveUp, -} - -/// The session retry policy determines what to if the connection fails. In these circumstances, -/// the client needs to re-establish a connection and the policy says how many times to try between -/// failure and at what interval. -/// -/// The retry policy may choose a `retry_limit` of `None` for infinite retries. It may define -/// a `retry_interval` for the period of time in MS between each retry. Note that the policy retains -/// its own minimum retry interval and will not retry any faster than that. -/// -/// Once a connection succeeds, the retry limit is reset. -#[derive(Debug, PartialEq, Clone)] -pub struct SessionRetryPolicy { - /// The session timeout period in milliseconds. Used by client to run a keep-alive operation. Initially this - /// will contain your desired timeout period, but it will be adjusted when the session is created. - session_timeout: f64, - /// The maximum number of times to retry between failures before giving up. A value of 0 means - /// no retries, i.e. give up on first fail, None means no limit, i.e. infinity - retry_limit: Option, - /// Interval between retries in milliseconds - retry_interval: u32, - /// The number of failed attempts so far since the last connection. When the connection succeeds - /// this value is reset. - retry_count: u32, - /// The last retry attempt timestamp. - last_attempt: DateTime, -} - -impl Default for SessionRetryPolicy { - fn default() -> Self { - Self::new( - Self::DEFAULT_SESSION_TIMEOUT_MS, - Self::DEFAULT_RETRY_LIMIT, - Self::DEFAULT_RETRY_INTERVAL_MS, - ) - } -} - -impl SessionRetryPolicy { - /// The default retry policy will attempt to reconnect up to this many times. - pub const DEFAULT_RETRY_LIMIT: u32 = 10; - /// The default retry policy will wait this duration between reconnect attempts. - pub const DEFAULT_RETRY_INTERVAL_MS: u32 = 10000; - /// The minimum retry interval - pub const MIN_RETRY_INTERVAL_MS: u32 = 500; - /// The default session timeout interval in millis - pub const DEFAULT_SESSION_TIMEOUT_MS: f64 = std::f64::MAX; - - /// Create a `SessionRetryPolicy` with a limit and interval - pub fn new(session_timeout: f64, retry_limit: u32, retry_interval: u32) -> Self { - let session_timeout = if session_timeout == 0.0 { - Self::DEFAULT_SESSION_TIMEOUT_MS - } else { - session_timeout - }; - let retry_interval = if retry_interval < Self::MIN_RETRY_INTERVAL_MS { - Self::MIN_RETRY_INTERVAL_MS - } else { - retry_interval - }; - SessionRetryPolicy { - session_timeout, - retry_count: 0, - last_attempt: Self::last_attempt_default(), - retry_limit: Some(retry_limit), - retry_interval, - } - } - - /// Create a `SessionRetryPolicy` that tries forever at the specified interval - pub fn infinity(session_timeout: f64, retry_interval: u32) -> Self { - let session_timeout = if session_timeout == 0.0 { - Self::DEFAULT_SESSION_TIMEOUT_MS - } else { - session_timeout - }; - let retry_interval = if retry_interval < Self::MIN_RETRY_INTERVAL_MS { - Self::MIN_RETRY_INTERVAL_MS - } else { - retry_interval - }; - SessionRetryPolicy { - session_timeout, - retry_count: 0, - last_attempt: Self::last_attempt_default(), - retry_limit: None, - retry_interval, - } - } - - /// Create a `SessionRetryPolicy` that never tries again. - pub fn never(session_timeout: f64) -> Self { - Self::new(session_timeout, 0, 0) - } - - fn last_attempt_default() -> DateTime { - DateTime::ymd(1900, 1, 1) - } - - pub fn session_timeout(&self) -> f64 { - self.session_timeout - } - - pub fn retry_count(&self) -> u32 { - self.retry_count - } - - pub fn increment_retry_count(&mut self) { - self.retry_count += 1; - } - - pub fn reset_retry_count(&mut self) { - self.retry_count = 0; - } - - pub fn set_last_attempt(&mut self, last_attempt: DateTime) { - self.last_attempt = last_attempt; - } - - /// Asks the policy, given the last retry attempt, should we try to connect again, wait a period of time - /// or give up entirely. - pub fn should_retry_connect(&self, now: DateTime) -> Answer { - if let Some(retry_limit) = self.retry_limit { - if self.retry_count >= retry_limit { - // Number of retries have been exceeded - return Answer::GiveUp; - } - } - - if self.retry_interval < Self::MIN_RETRY_INTERVAL_MS { - // The constructors don't allow for this - panic!("Retry interval is less than the minimum permitted."); - } - - // Look at how much time has elapsed since the last attempt - let elapsed = now - self.last_attempt; - let retry_interval = Duration::milliseconds(self.retry_interval as i64); - if retry_interval > elapsed { - // Wait a bit - Answer::WaitFor((retry_interval - elapsed).num_milliseconds() as u32) - } else { - info!("Retry retriggered by policy"); - Answer::Retry - } - } -} - -#[test] -fn session_retry() { - let mut session_retry = SessionRetryPolicy::default(); - - let now = DateTime::now(); - - let retry_interval = - Duration::milliseconds(SessionRetryPolicy::DEFAULT_RETRY_INTERVAL_MS as i64); - let last_attempt_expired = now - retry_interval - Duration::nanoseconds(1); - let last_attempt_wait = now - retry_interval + Duration::seconds(1); - - assert_eq!( - session_retry.session_timeout(), - SessionRetryPolicy::DEFAULT_SESSION_TIMEOUT_MS - ); - - session_retry.set_last_attempt(last_attempt_expired); - assert_eq!(session_retry.should_retry_connect(now), Answer::Retry); - session_retry.retry_count = SessionRetryPolicy::DEFAULT_RETRY_LIMIT - 1; - assert_eq!(session_retry.should_retry_connect(now), Answer::Retry); - session_retry.retry_count = SessionRetryPolicy::DEFAULT_RETRY_LIMIT; - assert_eq!(session_retry.should_retry_connect(now), Answer::GiveUp); - - session_retry.set_last_attempt(last_attempt_wait); - session_retry.retry_count = 0; - assert_eq!( - session_retry.should_retry_connect(now), - Answer::WaitFor(1000) - ); -} - -#[test] -fn session_retry_infinity() { - let session_retry = SessionRetryPolicy::infinity(444.444, 1000); - let now = DateTime::now(); - assert_eq!(session_retry.should_retry_connect(now), Answer::Retry); - assert_eq!(session_retry.session_timeout(), 444.444); -} - -#[test] -fn session_retry_never() { - let session_retry = SessionRetryPolicy::never(987.123); - let now = DateTime::now(); - assert_eq!(session_retry.should_retry_connect(now), Answer::GiveUp); - assert_eq!(session_retry.session_timeout(), 987.123); -} diff --git a/lib/src/client/subscription.rs b/lib/src/client/subscription.rs deleted file mode 100644 index 16a2c3c2d..000000000 --- a/lib/src/client/subscription.rs +++ /dev/null @@ -1,389 +0,0 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2024 Adam Lock - -//! Provides subscription and monitored item tracking. -//! -//! The structs and functions in this file allow the client to maintain a shadow copy of the -//! subscription and monitored item state on the server. If the server goes down and the session -//! needs to be recreated, the client API can reconstruct the subscriptions and monitored item from -//! its shadow version. -//! -//! None of this is for public consumption. The client is expected to recreate state automatically -//! on a reconnect if necessary. - -use std::{ - collections::{BTreeSet, HashMap, HashSet}, - marker::Sync, - sync::Arc, -}; - -use crate::sync::*; -use crate::types::{ - service_types::{DataChangeNotification, ReadValueId}, - *, -}; - -use super::callbacks::OnSubscriptionNotification; - -pub(crate) struct CreateMonitoredItem { - pub id: u32, - pub client_handle: u32, - pub item_to_monitor: ReadValueId, - pub monitoring_mode: MonitoringMode, - pub queue_size: u32, - pub discard_oldest: bool, - pub sampling_interval: f64, -} - -pub(crate) struct ModifyMonitoredItem { - pub id: u32, - pub sampling_interval: f64, - pub queue_size: u32, -} - -#[derive(Debug)] -pub struct MonitoredItem { - /// This is the monitored item's id within the subscription - id: u32, - /// Monitored item's handle. Used internally - not modifiable - client_handle: u32, - // The thing that is actually being monitored - the node id, attribute, index, encoding. - item_to_monitor: ReadValueId, - /// Queue size - queue_size: usize, - /// Discard oldest - discard_oldest: bool, - /// Monitoring mode - monitoring_mode: MonitoringMode, - /// Sampling interval - sampling_interval: f64, - /// Last value of the item - last_value: DataValue, - /// A list of all values received in the last data change notification. This list is cleared immediately - /// after the data change notification. - values: Vec, - /// Triggered items - triggered_items: BTreeSet, -} - -impl MonitoredItem { - pub fn new(client_handle: u32) -> MonitoredItem { - MonitoredItem { - id: 0, - queue_size: 1, - sampling_interval: 0.0, - item_to_monitor: ReadValueId { - node_id: NodeId::null(), - attribute_id: 0, - index_range: UAString::null(), - data_encoding: QualifiedName::null(), - }, - monitoring_mode: MonitoringMode::Reporting, - discard_oldest: false, - last_value: DataValue::null(), - values: Vec::with_capacity(1), - client_handle, - triggered_items: BTreeSet::new(), - } - } - - pub fn id(&self) -> u32 { - self.id - } - - pub fn client_handle(&self) -> u32 { - self.client_handle - } - - pub fn item_to_monitor(&self) -> &ReadValueId { - &self.item_to_monitor - } - - pub fn sampling_interval(&self) -> f64 { - self.sampling_interval - } - - pub fn queue_size(&self) -> usize { - self.queue_size - } - - pub fn last_value(&self) -> &DataValue { - &self.last_value - } - - pub fn values(&self) -> &Vec { - &self.values - } - - pub fn clear_values(&mut self) { - self.values.clear(); - } - - pub fn append_new_value(&mut self, value: DataValue) { - if self.values.len() == self.queue_size { - let _ = self.values.pop(); - self.values.push(value); - } - } - - pub fn monitoring_mode(&self) -> MonitoringMode { - self.monitoring_mode - } - - pub fn discard_oldest(&self) -> bool { - self.discard_oldest - } - - pub(crate) fn set_id(&mut self, value: u32) { - self.id = value; - } - - pub(crate) fn set_item_to_monitor(&mut self, item_to_monitor: ReadValueId) { - self.item_to_monitor = item_to_monitor; - } - - pub(crate) fn set_sampling_interval(&mut self, value: f64) { - self.sampling_interval = value; - } - - pub(crate) fn set_queue_size(&mut self, value: usize) { - self.queue_size = value; - if self.queue_size > self.values.capacity() { - self.values - .reserve(self.queue_size - self.values.capacity()); - } - } - - pub(crate) fn set_monitoring_mode(&mut self, monitoring_mode: MonitoringMode) { - self.monitoring_mode = monitoring_mode; - } - - pub(crate) fn set_discard_oldest(&mut self, discard_oldest: bool) { - self.discard_oldest = discard_oldest; - } - - pub(crate) fn set_triggering(&mut self, links_to_add: &[u32], links_to_remove: &[u32]) { - links_to_remove.iter().for_each(|i| { - self.triggered_items.remove(i); - }); - links_to_add.iter().for_each(|i| { - self.triggered_items.insert(*i); - }); - } - - pub(crate) fn triggered_items(&self) -> &BTreeSet { - &self.triggered_items - } -} - -pub struct Subscription { - /// Subscription id, supplied by server - subscription_id: u32, - /// Publishing interval in seconds - publishing_interval: f64, - /// Lifetime count, revised by server - lifetime_count: u32, - /// Max keep alive count, revised by server - max_keep_alive_count: u32, - /// Max notifications per publish, revised by server - max_notifications_per_publish: u32, - /// Publishing enabled - publishing_enabled: bool, - /// Priority - priority: u8, - /// The change callback will be what is called if any monitored item changes within a cycle. - /// The monitored item is referenced by its id - notification_callback: Arc>, - /// A map of monitored items associated with the subscription (key = monitored_item_id) - monitored_items: HashMap, - /// A map of client handle to monitored item id - client_handles: HashMap, -} - -impl Subscription { - /// Creates a new subscription using the supplied parameters and the supplied data change callback. - pub fn new( - subscription_id: u32, - publishing_interval: f64, - lifetime_count: u32, - max_keep_alive_count: u32, - max_notifications_per_publish: u32, - publishing_enabled: bool, - priority: u8, - notification_callback: Arc>, - ) -> Subscription { - Subscription { - subscription_id, - publishing_interval, - lifetime_count, - max_keep_alive_count, - max_notifications_per_publish, - publishing_enabled, - priority, - notification_callback, - monitored_items: HashMap::new(), - client_handles: HashMap::new(), - } - } - - pub fn monitored_items(&self) -> &HashMap { - &self.monitored_items - } - - pub fn subscription_id(&self) -> u32 { - self.subscription_id - } - - pub fn publishing_interval(&self) -> f64 { - self.publishing_interval - } - - pub fn lifetime_count(&self) -> u32 { - self.lifetime_count - } - - pub fn max_keep_alive_count(&self) -> u32 { - self.max_keep_alive_count - } - - pub fn max_notifications_per_publish(&self) -> u32 { - self.max_notifications_per_publish - } - - pub fn publishing_enabled(&self) -> bool { - self.publishing_enabled - } - - pub fn priority(&self) -> u8 { - self.priority - } - - pub fn notification_callback( - &self, - ) -> Arc> { - self.notification_callback.clone() - } - - pub(crate) fn set_publishing_interval(&mut self, publishing_interval: f64) { - self.publishing_interval = publishing_interval; - } - - pub(crate) fn set_lifetime_count(&mut self, lifetime_count: u32) { - self.lifetime_count = lifetime_count; - } - - pub(crate) fn set_max_keep_alive_count(&mut self, max_keep_alive_count: u32) { - self.max_keep_alive_count = max_keep_alive_count; - } - - pub(crate) fn set_max_notifications_per_publish(&mut self, max_notifications_per_publish: u32) { - self.max_notifications_per_publish = max_notifications_per_publish; - } - - pub(crate) fn set_priority(&mut self, priority: u8) { - self.priority = priority; - } - - pub(crate) fn set_publishing_enabled(&mut self, publishing_enabled: bool) { - self.publishing_enabled = publishing_enabled; - } - - pub(crate) fn insert_monitored_items(&mut self, items_to_create: &[CreateMonitoredItem]) { - items_to_create.iter().for_each(|i| { - let mut monitored_item = MonitoredItem::new(i.client_handle); - monitored_item.set_id(i.id); - monitored_item.set_monitoring_mode(i.monitoring_mode); - monitored_item.set_discard_oldest(i.discard_oldest); - monitored_item.set_sampling_interval(i.sampling_interval); - monitored_item.set_queue_size(i.queue_size as usize); - monitored_item.set_item_to_monitor(i.item_to_monitor.clone()); - - let client_handle = monitored_item.client_handle(); - let monitored_item_id = monitored_item.id(); - self.monitored_items - .insert(monitored_item_id, monitored_item); - self.client_handles.insert(client_handle, monitored_item_id); - }); - } - - pub(crate) fn modify_monitored_items(&mut self, items_to_modify: &[ModifyMonitoredItem]) { - items_to_modify.iter().for_each(|i| { - if let Some(ref mut monitored_item) = self.monitored_items.get_mut(&i.id) { - monitored_item.set_sampling_interval(i.sampling_interval); - monitored_item.set_queue_size(i.queue_size as usize); - } - }); - } - - pub(crate) fn delete_monitored_items(&mut self, items_to_delete: &[u32]) { - items_to_delete.iter().for_each(|id| { - // Remove the monitored item and the client handle / id entry - if let Some(monitored_item) = self.monitored_items.remove(id) { - let _ = self.client_handles.remove(&monitored_item.client_handle()); - } - }) - } - - pub(crate) fn set_triggering( - &mut self, - triggering_item_id: u32, - links_to_add: &[u32], - links_to_remove: &[u32], - ) { - if let Some(ref mut monitored_item) = self.monitored_items.get_mut(&triggering_item_id) { - monitored_item.set_triggering(links_to_add, links_to_remove); - } - } - - fn monitored_item_id_from_handle(&self, client_handle: u32) -> Option { - self.client_handles.get(&client_handle).copied() - } - - pub(crate) fn on_event(&mut self, events: &[EventNotificationList]) { - let mut cb = trace_lock!(self.notification_callback); - events.iter().for_each(|event| { - cb.on_event(event); - }); - } - - pub(crate) fn on_data_change(&mut self, data_change_notifications: &[DataChangeNotification]) { - let mut monitored_item_ids = HashSet::new(); - data_change_notifications.iter().for_each(|n| { - if let Some(ref monitored_items) = n.monitored_items { - monitored_item_ids.clear(); - for i in monitored_items { - let monitored_item_id = { - let monitored_item_id = self.monitored_item_id_from_handle(i.client_handle); - if monitored_item_id.is_none() { - continue; - } - *monitored_item_id.as_ref().unwrap() - }; - let monitored_item = self.monitored_items.get_mut(&monitored_item_id).unwrap(); - monitored_item.last_value = i.value.clone(); - monitored_item.values.push(i.value.clone()); - monitored_item_ids.insert(monitored_item_id); - } - if !monitored_item_ids.is_empty() { - let data_change_items: Vec<&MonitoredItem> = monitored_item_ids - .iter() - .map(|id| self.monitored_items.get(id).unwrap()) - .collect(); - - { - // Call the call back with the changes we collected - let mut cb = trace_lock!(self.notification_callback); - cb.on_data_change(&data_change_items); - } - - // Clear the values - monitored_item_ids.iter().for_each(|id| { - let m = self.monitored_items.get_mut(id).unwrap(); - m.clear_values(); - }); - } - } - }); - } -} diff --git a/lib/src/client/tests/mod.rs b/lib/src/client/tests/mod.rs deleted file mode 100644 index 596299810..000000000 --- a/lib/src/client/tests/mod.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::{self, collections::BTreeMap, path::PathBuf}; - -use crate::core::config::Config; -use crate::crypto::SecurityPolicy; -use crate::types::*; - -use crate::client::{ - builder::ClientBuilder, - config::{ClientConfig, ClientEndpoint, ClientUserToken, ANONYMOUS_USER_TOKEN_ID}, -}; - -fn make_test_file(filename: &str) -> PathBuf { - let mut path = std::env::temp_dir(); - path.push(filename); - path -} - -pub fn sample_builder() -> ClientBuilder { - ClientBuilder::new() - .application_name("OPC UA Sample Client") - .application_uri("urn:SampleClient") - .create_sample_keypair(true) - .certificate_path("own/cert.der") - .private_key_path("private/private.pem") - .trust_server_certs(true) - .pki_dir("./pki") - .endpoints(vec![ - ( - "sample_none", - ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::None.to_str()), - security_mode: String::from(MessageSecurityMode::None), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }, - ), - ( - "sample_basic128rsa15", - ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::Basic128Rsa15.to_str()), - security_mode: String::from(MessageSecurityMode::SignAndEncrypt), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }, - ), - ( - "sample_basic256", - ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::Basic256.to_str()), - security_mode: String::from(MessageSecurityMode::SignAndEncrypt), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }, - ), - ( - "sample_basic256sha256", - ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::Basic256Sha256.to_str()), - security_mode: String::from(MessageSecurityMode::SignAndEncrypt), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }, - ), - ]) - .default_endpoint("sample_none") - .user_token( - "sample_user", - ClientUserToken::user_pass("sample1", "sample1pwd"), - ) - .user_token( - "sample_user2", - ClientUserToken::user_pass("sample2", "sample2pwd"), - ) -} - -pub fn default_sample_config() -> ClientConfig { - sample_builder().config() -} - -#[test] -fn client_sample_config() { - // This test exists to create the samples/client.conf file - // This test only exists to dump a sample config - let config = default_sample_config(); - let mut path = std::env::current_dir().unwrap(); - path.push(".."); - path.push("samples"); - path.push("client.conf"); - println!("Path is {:?}", path); - - let saved = config.save(&path); - println!("Saved = {:?}", saved); - assert!(saved.is_ok()); - assert!(config.is_valid()); -} - -#[test] -fn client_config() { - let path = make_test_file("client_config.yaml"); - println!("Client path = {:?}", path); - let config = default_sample_config(); - let saved = config.save(&path); - println!("Saved = {:?}", saved); - assert!(config.save(&path).is_ok()); - if let Ok(config2) = ClientConfig::load(&path) { - assert_eq!(config, config2); - } else { - panic!("Cannot load config from file"); - } -} - -#[test] -fn client_invalid_security_policy_config() { - let mut config = default_sample_config(); - // Security policy is wrong - config.endpoints = BTreeMap::new(); - config.endpoints.insert( - String::from("sample_none"), - ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855"), - security_policy: String::from("http://blah"), - security_mode: String::from(MessageSecurityMode::None), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }, - ); - assert!(!config.is_valid()); -} - -#[test] -fn client_invalid_security_mode_config() { - let mut config = default_sample_config(); - // Message security mode is wrong - config.endpoints = BTreeMap::new(); - config.endpoints.insert( - String::from("sample_none"), - ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855"), - security_policy: String::from(SecurityPolicy::Basic128Rsa15.to_uri()), - security_mode: String::from("SingAndEncrypt"), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }, - ); - assert!(!config.is_valid()); -} - -#[test] -fn client_anonymous_user_tokens_id() { - let mut config = default_sample_config(); - // id anonymous is reserved - config.user_tokens = BTreeMap::new(); - config.user_tokens.insert( - String::from("ANONYMOUS"), - ClientUserToken { - user: String::new(), - password: Some(String::new()), - cert_path: None, - private_key_path: None, - }, - ); - assert!(!config.is_valid()); -} diff --git a/lib/src/client/transport/buffer.rs b/lib/src/client/transport/buffer.rs new file mode 100644 index 000000000..5c501a242 --- /dev/null +++ b/lib/src/client/transport/buffer.rs @@ -0,0 +1,368 @@ +use std::{ + collections::VecDeque, + io::{BufRead, Cursor}, +}; + +use tokio::io::AsyncWriteExt; + +use crate::{ + core::{ + comms::{chunker::Chunker, message_chunk::MessageChunk, secure_channel::SecureChannel}, + supported_message::SupportedMessage, + }, + types::StatusCode, +}; + +#[derive(Copy, Clone, Debug)] +enum SendBufferState { + Reading(usize), + Writing, +} + +pub struct SendBuffer { + /// The send buffer + buffer: Cursor>, + /// Queued chunks + chunks: VecDeque, + /// The last request id + last_request_id: u32, + /// Last sent sequence number + last_sent_sequence_number: u32, + /// Maximum size of a message, total. Use 0 for no limit + max_message_size: usize, + /// Maximum number of chunks in a message. + max_chunk_count: usize, + /// Maximum size of each individual chunk. + send_buffer_size: usize, + + state: SendBufferState, +} + +// The send buffer works as follows: +// - `write` is called with a message that is written to the internal buffer. +// - `read_into_async` is called, which sets the state to `Writing`. +// - Once the buffer is exhausted, the state is set back to `Reading`. +// - `write` cannot be called while we are writing to the output. +impl SendBuffer { + pub fn new(buffer_size: usize, max_message_size: usize, max_chunk_count: usize) -> Self { + Self { + buffer: Cursor::new(vec![0u8; buffer_size + 1024]), + chunks: VecDeque::with_capacity(max_chunk_count), + last_request_id: 1000, + last_sent_sequence_number: 0, + max_message_size, + max_chunk_count, + send_buffer_size: buffer_size, + state: SendBufferState::Writing, + } + } + + pub fn encode_next_chunk(&mut self, secure_channel: &SecureChannel) -> Result<(), StatusCode> { + if matches!(self.state, SendBufferState::Reading(_)) { + return Err(StatusCode::BadInvalidState); + } + + let Some(next_chunk) = self.chunks.pop_front() else { + return Ok(()); + }; + + trace!("Sending chunk {:?}", next_chunk); + let size = secure_channel.apply_security(&next_chunk, self.buffer.get_mut())?; + self.state = SendBufferState::Reading(size); + + Ok(()) + } + + pub fn write( + &mut self, + request_id: u32, + message: SupportedMessage, + secure_channel: &SecureChannel, + ) -> Result { + trace!("Writing request to buffer"); + + // We're not allowed to write when in reading state, we need to empty the buffer first + if matches!(self.state, SendBufferState::Reading(_)) { + return Err(StatusCode::BadInvalidState); + } + + // Turn message to chunk(s) + let chunks = Chunker::encode( + self.last_sent_sequence_number + 1, + request_id, + self.max_message_size, + self.send_buffer_size, + secure_channel, + &message, + )?; + + if self.max_chunk_count > 0 && chunks.len() > self.max_chunk_count { + error!( + "Cannot write message since {} chunks exceeds {} chunk limit", + chunks.len(), + self.max_chunk_count + ); + Err(StatusCode::BadCommunicationError) + } else { + // Sequence number monotonically increases per chunk + self.last_sent_sequence_number += chunks.len() as u32; + + // Send chunks + self.chunks.extend(chunks.into_iter()); + Ok(request_id) + } + } + + pub fn next_request_id(&mut self) -> u32 { + self.last_request_id += 1; + self.last_request_id + } + + pub async fn read_into_async( + &mut self, + write: &mut (impl tokio::io::AsyncWrite + Unpin), + ) -> Result<(), tokio::io::Error> { + // Set the state to writing, or get the current end point + let end = match self.state { + SendBufferState::Writing => { + let end = self.buffer.position() as usize; + self.state = SendBufferState::Reading(end); + self.buffer.set_position(0); + end + } + SendBufferState::Reading(end) => end, + }; + + let pos = self.buffer.position() as usize; + let buf = &self.buffer.get_ref()[pos..end]; + // Write to the stream, note that we do not actually advance the stream before + // after we have written. This means that since `write` is cancellation safe, our stream is + // cancellation safe, which is essential. + let written = write.write(buf).await?; + + self.buffer.consume(written); + + if end == self.buffer.position() as usize { + self.state = SendBufferState::Writing; + self.buffer.set_position(0); + } + + Ok(()) + } + + pub fn should_encode_chunks(&self) -> bool { + !self.chunks.is_empty() && !self.can_read() + } + + pub fn can_read(&self) -> bool { + matches!(self.state, SendBufferState::Reading(_)) || self.buffer.position() != 0 + } +} + +#[cfg(test)] +mod tests { + use std::io::Cursor; + use std::sync::Arc; + + use parking_lot::RwLock; + + use super::SendBuffer; + + use crate::core::comms::secure_channel::{Role, SecureChannel}; + use crate::crypto::CertificateStore; + use crate::server::prelude::StatusCode; + use crate::types::{ + DateTime, DecodingOptions, NodeId, ReadRequest, ReadValueId, RequestHeader, + TimestampsToReturn, + }; + + fn get_buffer_and_channel() -> (SendBuffer, SecureChannel) { + let buffer = SendBuffer::new(8196, 81960, 5); + let channel = SecureChannel::new( + Arc::new(RwLock::new(CertificateStore::new(std::path::Path::new( + "./pki", + )))), + Role::Client, + DecodingOptions::test(), + ); + + (buffer, channel) + } + + #[tokio::test] + async fn test_buffer_simple() { + crate::console_logging::init(); + // Write a small message to the buffer + let message = ReadRequest { + request_header: RequestHeader::new(&NodeId::null(), &DateTime::null(), 101), + max_age: 0.0, + timestamps_to_return: TimestampsToReturn::Both, + nodes_to_read: Some(vec![ReadValueId { + node_id: (1, 1).into(), + attribute_id: 1, + ..Default::default() + }]), + }; + + let (mut buffer, channel) = get_buffer_and_channel(); + + let request_id = buffer.write(1, message.into(), &channel).unwrap(); + assert_eq!(request_id, 1); + + assert!(buffer.should_encode_chunks()); + assert_eq!(buffer.chunks.len(), 1); + buffer.encode_next_chunk(&channel).unwrap(); + assert!(buffer.can_read()); + + let mut cursor = Cursor::new(Vec::new()); + buffer.read_into_async(&mut cursor).await.unwrap(); + assert!(cursor.get_ref().len() > 50); + } + + #[tokio::test] + async fn test_buffer_chunking() { + crate::console_logging::init(); + // Write a large enough message that it is split into chunks. + let message = ReadRequest { + request_header: RequestHeader::new(&NodeId::null(), &DateTime::null(), 101), + max_age: 0.0, + timestamps_to_return: TimestampsToReturn::Both, + nodes_to_read: Some( + (0..1000) + .map(|r| ReadValueId { + node_id: (1, r).into(), + attribute_id: 1, + ..Default::default() + }) + .collect(), + ), + }; + + let (mut buffer, channel) = get_buffer_and_channel(); + + let request_id = buffer.write(1, message.into(), &channel).unwrap(); + assert_eq!(request_id, 1); + + assert_eq!(buffer.chunks.len(), 3); + let mut cursor = Cursor::new(Vec::new()); + + for _ in 0..3 { + assert!(buffer.should_encode_chunks()); + buffer.encode_next_chunk(&channel).unwrap(); + assert!(!buffer.should_encode_chunks()); + assert!(buffer.can_read()); + + buffer.read_into_async(&mut cursor).await.unwrap(); + } + assert!(!buffer.should_encode_chunks()); + assert!(!buffer.can_read()); + assert!(cursor.get_ref().len() > 8196 * 2 && cursor.get_ref().len() < 8196 * 3); + } + + #[test] + fn test_buffer_too_large_message() { + crate::console_logging::init(); + // Write a very large message exceeding the max message size. + let message = ReadRequest { + request_header: RequestHeader::new(&NodeId::null(), &DateTime::null(), 101), + max_age: 0.0, + timestamps_to_return: TimestampsToReturn::Both, + nodes_to_read: Some( + (0..10000) + .map(|r| ReadValueId { + node_id: (1, r).into(), + attribute_id: 1, + ..Default::default() + }) + .collect(), + ), + }; + + let (mut buffer, channel) = get_buffer_and_channel(); + + let err = buffer.write(1, message.into(), &channel).unwrap_err(); + assert_eq!(err, StatusCode::BadRequestTooLarge); + } + + #[test] + fn test_buffer_too_many_chunks() { + crate::console_logging::init(); + // Write a large enough message that we exceed the maximum chunk count. + let message = ReadRequest { + request_header: RequestHeader::new(&NodeId::null(), &DateTime::null(), 101), + max_age: 0.0, + timestamps_to_return: TimestampsToReturn::Both, + nodes_to_read: Some( + (0..4000) + .map(|r| ReadValueId { + node_id: (1, r).into(), + attribute_id: 1, + ..Default::default() + }) + .collect(), + ), + }; + + let (mut buffer, channel) = get_buffer_and_channel(); + + let err = buffer.write(1, message.into(), &channel).unwrap_err(); + assert_eq!(err, StatusCode::BadCommunicationError); + } + + #[tokio::test] + async fn test_buffer_read_partial() { + crate::console_logging::init(); + // Write a large message to the buffer. + let message = ReadRequest { + request_header: RequestHeader::new(&NodeId::null(), &DateTime::null(), 101), + max_age: 0.0, + timestamps_to_return: TimestampsToReturn::Both, + nodes_to_read: Some( + (0..1000) + .map(|r| ReadValueId { + node_id: (1, r).into(), + attribute_id: 1, + ..Default::default() + }) + .collect(), + ), + }; + + let (mut buffer, channel) = get_buffer_and_channel(); + + let request_id = buffer.write(1, message.into(), &channel).unwrap(); + assert_eq!(request_id, 1); + + assert_eq!(buffer.chunks.len(), 3); + // Use a fixed size buffer exactly half the chunk size. This simulates a TCP connection + // writing data in smaller chunks than configured chunk size. + let mut buf = [0u8; 4098]; + // Cursor<&mut [u8; N]> doesn't support AsyncWrite, but Cursor<&mut [u8]> does. + let mut cursor = Cursor::new(&mut buf as &mut [u8]); + + for _ in 0..2 { + println!("Encode chunks"); + assert!(buffer.should_encode_chunks()); + buffer.encode_next_chunk(&channel).unwrap(); + assert!(!buffer.should_encode_chunks()); + assert!(buffer.can_read()); + + buffer.read_into_async(&mut cursor).await.unwrap(); + assert!(buffer.can_read()); + assert_eq!(cursor.position(), 4098); + cursor.set_position(0); + buffer.read_into_async(&mut cursor).await.unwrap(); + assert!(!buffer.can_read()); + assert_eq!(cursor.position(), 4098); + cursor.set_position(0); + } + assert!(buffer.should_encode_chunks()); + buffer.encode_next_chunk(&channel).unwrap(); + assert!(buffer.can_read()); + buffer.read_into_async(&mut cursor).await.unwrap(); + assert!(cursor.position() < 4098); + + assert!(!buffer.should_encode_chunks()); + assert!(!buffer.can_read()); + } +} diff --git a/lib/src/client/transport/channel.rs b/lib/src/client/transport/channel.rs new file mode 100644 index 000000000..7273cc9dc --- /dev/null +++ b/lib/src/client/transport/channel.rs @@ -0,0 +1,269 @@ +use std::{str::FromStr, sync::Arc, time::Duration}; + +use crate::{ + client::{session::SessionInfo, transport::core::TransportPollResult}, + core::{ + comms::secure_channel::{Role, SecureChannel}, + supported_message::SupportedMessage, + }, + crypto::{CertificateStore, SecurityPolicy}, + sync::RwLock, + types::{ + ByteString, CloseSecureChannelRequest, DecodingOptions, NodeId, RequestHeader, + SecurityTokenRequestType, StatusCode, + }, +}; +use arc_swap::{ArcSwap, ArcSwapOption}; + +use super::state::{Request, RequestSend, SecureChannelState}; + +use crate::client::{ + retry::SessionRetryPolicy, + transport::{ + tcp::{TcpTransport, TransportConfiguration}, + OutgoingMessage, + }, +}; + +/// Wrapper around an open secure channel +pub struct AsyncSecureChannel { + session_info: SessionInfo, + session_retry_policy: SessionRetryPolicy, + pub(crate) secure_channel: Arc>, + certificate_store: Arc>, + transport_config: TransportConfiguration, + state: SecureChannelState, + issue_channel_lock: tokio::sync::Mutex<()>, + + request_send: ArcSwapOption, +} + +pub struct SecureChannelEventLoop { + transport: TcpTransport, +} + +impl SecureChannelEventLoop { + pub async fn poll(&mut self) -> TransportPollResult { + self.transport.poll().await + } +} + +impl AsyncSecureChannel { + pub fn new( + certificate_store: Arc>, + session_info: SessionInfo, + session_retry_policy: SessionRetryPolicy, + decoding_options: DecodingOptions, + ignore_clock_skew: bool, + auth_token: Arc>, + transport_config: TransportConfiguration, + ) -> Self { + let secure_channel = Arc::new(RwLock::new(SecureChannel::new( + certificate_store.clone(), + Role::Client, + decoding_options, + ))); + + Self { + transport_config, + issue_channel_lock: tokio::sync::Mutex::new(()), + state: SecureChannelState::new(ignore_clock_skew, secure_channel.clone(), auth_token), + session_info, + secure_channel, + certificate_store, + session_retry_policy, + request_send: Default::default(), + } + } + + pub async fn send( + &self, + request: impl Into, + timeout: Duration, + ) -> Result { + let sender = self.request_send.load().as_deref().cloned(); + let Some(send) = sender else { + return Err(StatusCode::BadNotConnected); + }; + + let should_renew_security_token = { + let secure_channel = trace_read_lock!(self.secure_channel); + secure_channel.should_renew_security_token() + }; + + if should_renew_security_token { + // Grab the lock, then check again whether we should renew the secure channel, + // this avoids renewing it multiple times if the client sends many requests in quick + // succession. + // Also, if the channel is currently being renewed, we need to wait for the new security token. + let guard = self.issue_channel_lock.lock().await; + let should_renew_security_token = { + let secure_channel = trace_read_lock!(self.secure_channel); + secure_channel.should_renew_security_token() + }; + + if should_renew_security_token { + let request = self.state.begin_issue_or_renew_secure_channel( + SecurityTokenRequestType::Renew, + Duration::from_secs(30), + send.clone(), + ); + + let resp = request.send().await?; + + self.state.end_issue_or_renew_secure_channel(resp)?; + } + + drop(guard); + } + + Request::new(request, send, timeout).send().await + } + + pub async fn connect(&self) -> Result { + self.request_send.store(None); + loop { + let mut backoff = self.session_retry_policy.new_backoff(); + match self.connect_no_retry().await { + Ok(event_loop) => { + break Ok(event_loop); + } + Err(s) => { + let Some(delay) = backoff.next() else { + break Err(s); + }; + + tokio::time::sleep(delay).await + } + } + } + } + + pub(crate) fn make_request_header(&self, timeout: Duration) -> RequestHeader { + self.state.make_request_header(timeout) + } + + pub(crate) fn client_nonce(&self) -> ByteString { + let secure_channel = trace_read_lock!(self.secure_channel); + secure_channel.local_nonce_as_byte_string() + } + + pub(crate) fn update_from_created_session( + &self, + nonce: &ByteString, + certificate: &ByteString, + ) -> Result<(), StatusCode> { + let mut secure_channel = trace_write_lock!(self.secure_channel); + secure_channel.set_remote_nonce_from_byte_string(nonce)?; + secure_channel.set_remote_cert_from_byte_string(certificate)?; + Ok(()) + } + + pub(crate) fn security_policy(&self) -> SecurityPolicy { + let secure_channel = trace_read_lock!(self.secure_channel); + secure_channel.security_policy() + } + + pub async fn connect_no_retry(&self) -> Result { + { + let mut secure_channel = trace_write_lock!(self.secure_channel); + secure_channel.clear_security_token(); + } + + let (mut transport, send) = self.create_transport().await?; + + let request = self.state.begin_issue_or_renew_secure_channel( + SecurityTokenRequestType::Issue, + Duration::from_secs(30), + send.clone(), + ); + + let request_fut = request.send(); + tokio::pin!(request_fut); + + // Temporarily poll the transport task while we're waiting for a response. + let resp = loop { + tokio::select! { + r = &mut request_fut => break r?, + r = transport.poll() => { + if let TransportPollResult::Closed(e) = r { + return Err(e); + } + } + } + }; + + self.request_send.store(Some(Arc::new(send))); + self.state.end_issue_or_renew_secure_channel(resp)?; + + Ok(SecureChannelEventLoop { transport }) + } + + async fn create_transport( + &self, + ) -> Result<(TcpTransport, tokio::sync::mpsc::Sender), StatusCode> { + let endpoint_url = self.session_info.endpoint.endpoint_url.clone(); + info!("Connect"); + let security_policy = + SecurityPolicy::from_str(self.session_info.endpoint.security_policy_uri.as_ref()) + .unwrap(); + + if security_policy == SecurityPolicy::Unknown { + error!( + "connect, security policy \"{}\" is unknown", + self.session_info.endpoint.security_policy_uri.as_ref() + ); + return Err(StatusCode::BadSecurityPolicyRejected); + } else { + let (cert, key) = { + let certificate_store = trace_write_lock!(self.certificate_store); + certificate_store.read_own_cert_and_pkey_optional() + }; + + { + let mut secure_channel = trace_write_lock!(self.secure_channel); + secure_channel.set_private_key(key); + secure_channel.set_cert(cert); + secure_channel.set_security_policy(security_policy); + secure_channel.set_security_mode(self.session_info.endpoint.security_mode); + let _ = secure_channel.set_remote_cert_from_byte_string( + &self.session_info.endpoint.server_certificate, + ); + info!("Security policy = {:?}", security_policy); + info!( + "Security mode = {:?}", + self.session_info.endpoint.security_mode + ); + } + + let (send, recv) = tokio::sync::mpsc::channel(self.transport_config.max_inflight); + let transport = TcpTransport::connect( + self.secure_channel.clone(), + recv, + self.transport_config.clone(), + endpoint_url.as_ref(), + ) + .await?; + + Ok((transport, send)) + } + } + + /// Close the secure channel, optionally wait for the channel to close. + pub async fn close_channel(&self) { + let msg = CloseSecureChannelRequest { + request_header: self.state.make_request_header(Duration::from_secs(60)), + }; + + let sender = self.request_send.load().as_deref().cloned(); + let request = sender.map(|s| Request::new(msg, s, Duration::from_secs(60))); + + // Instruct the channel to not attempt to reopen. + if let Some(request) = request { + if let Err(e) = request.send_no_response().await { + error!("Failed to send disconnect message, queue full: {e}"); + return; + } + } + } +} diff --git a/lib/src/client/transport/core.rs b/lib/src/client/transport/core.rs new file mode 100644 index 000000000..0cc4a6fcc --- /dev/null +++ b/lib/src/client/transport/core.rs @@ -0,0 +1,308 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::Instant; + +use futures::future::Either; +use parking_lot::RwLock; + +use crate::core::comms::message_chunk::MessageIsFinalType; +use crate::core::comms::{ + chunker::Chunker, message_chunk::MessageChunk, message_chunk_info::ChunkInfo, + secure_channel::SecureChannel, tcp_codec::Message, +}; +use crate::core::supported_message::SupportedMessage; +use crate::types::StatusCode; + +use super::buffer::SendBuffer; + +#[derive(Debug)] +struct MessageChunkWithChunkInfo { + header: ChunkInfo, + data_with_header: Vec, +} + +pub(crate) struct MessageState { + callback: tokio::sync::oneshot::Sender>, + chunks: Vec, + deadline: Instant, +} + +pub(super) struct TransportState { + /// Channel for outgoing requests. Will only be polled if the number of inflight requests is below the limit. + outgoing_recv: tokio::sync::mpsc::Receiver, + /// State of pending requests + message_states: HashMap, + /// Maximum number of inflight requests, or None if unlimited. + max_inflight: usize, + /// Secure channel + pub(super) secure_channel: Arc>, + /// Max pending incoming messages + max_pending_incoming: usize, + /// Last decoded sequence number + last_received_sequence_number: u32, +} + +#[derive(Debug)] +pub enum TransportPollResult { + OutgoingMessage, + OutgoingMessageSent, + IncomingMessage, + Closed(StatusCode), +} + +pub(crate) struct OutgoingMessage { + pub request: SupportedMessage, + pub callback: Option>>, + pub deadline: Instant, +} + +impl TransportState { + pub fn new( + secure_channel: Arc>, + outgoing_recv: tokio::sync::mpsc::Receiver, + max_pending_incoming: usize, + max_inflight: usize, + ) -> Self { + Self { + secure_channel, + outgoing_recv, + message_states: HashMap::new(), + max_inflight, + max_pending_incoming, + last_received_sequence_number: 0, + } + } + + /// Wait for an outgoing message. Will also check for timed out messages. + pub async fn wait_for_outgoing_message( + &mut self, + send_buffer: &mut SendBuffer, + ) -> Option<(SupportedMessage, u32)> { + loop { + // Check for any messages that have timed out, and get the time until the next message + // times out + let timeout_fut = match self.next_timeout() { + Some(t) => Either::Left(tokio::time::sleep_until(t.into())), + None => Either::Right(futures::future::pending::<()>()), + }; + + // Only listen for outgoing messages if the number of inflight messages is below the limit. + if self.max_inflight > self.message_states.len() { + tokio::select! { + _ = timeout_fut => { + continue; + } + outgoing = self.outgoing_recv.recv() => { + let Some(outgoing) = outgoing else { + return None; + }; + let request_id = send_buffer.next_request_id(); + if let Some(callback) = outgoing.callback { + self.message_states.insert(request_id, MessageState { + callback, + chunks: Vec::new(), + deadline: outgoing.deadline, + }); + } + break Some((outgoing.request, request_id)); + } + } + } else { + timeout_fut.await; + } + } + } + + /// Store incoming messages in the message state. + pub fn handle_incoming_message(&mut self, message: Message) -> Result<(), StatusCode> { + let status = match message { + Message::Acknowledge(ack) => { + debug!("Reader got an unexpected ack {:?}", ack); + StatusCode::BadUnexpectedError + } + Message::Chunk(chunk) => self.process_chunk(chunk).err().unwrap_or(StatusCode::Good), + Message::Error(error) => { + if let Some(status_code) = StatusCode::from_u32(error.error) { + status_code + } else { + StatusCode::BadUnexpectedError + } + } + m => { + error!("Expected a recognized message, got {:?}", m); + StatusCode::BadUnexpectedError + } + }; + + if status.is_good() { + Ok(()) + } else { + Err(status) + } + } + + fn next_timeout(&mut self) -> Option { + let now = Instant::now(); + let mut next_timeout = None; + let mut timed_out = Vec::new(); + for (id, state) in &self.message_states { + if state.deadline <= now { + timed_out.push(*id); + } else { + match &next_timeout { + Some(t) if *t > state.deadline => next_timeout = Some(state.deadline), + None => next_timeout = Some(state.deadline), + _ => {} + } + } + } + for id in timed_out { + if let Some(state) = self.message_states.remove(&id) { + debug!("Message {} timed out", id); + let _ = state.callback.send(Err(StatusCode::BadTimeout)); + } + } + next_timeout + } + + fn process_chunk(&mut self, chunk: MessageChunk) -> Result<(), StatusCode> { + let mut secure_channel = trace_write_lock!(self.secure_channel); + let chunk = secure_channel.verify_and_remove_security(&chunk.data)?; + + let chunk_info = chunk.chunk_info(&secure_channel)?; + drop(secure_channel); + let req_id = chunk_info.sequence_header.request_id; + + // We do not care at all about incoming messages without a + // corresponding request. + let Some(message_state) = self.message_states.get_mut(&req_id) else { + return Ok(()); + }; + + match chunk_info.message_header.is_final { + MessageIsFinalType::Intermediate => { + debug!( + "receive chunk intermediate {}:{}", + chunk_info.sequence_header.request_id, + chunk_info.sequence_header.sequence_number + ); + message_state.chunks.push(MessageChunkWithChunkInfo { + header: chunk_info, + data_with_header: chunk.data, + }); + let chunks_len: usize = message_state.chunks.len(); + if self.max_pending_incoming > 0 && chunks_len > self.max_pending_incoming { + error!( + "too many pending incoming messages {} > {}", + chunks_len, self.max_pending_incoming + ); + let message_state = self.message_states.remove(&req_id).unwrap(); + let _ = message_state + .callback + .send(Err(StatusCode::BadEncodingLimitsExceeded)); + } + } + MessageIsFinalType::FinalError => { + info!("Discarding chunk marked in as final error"); + let message_state = self.message_states.remove(&req_id).unwrap(); + let _ = message_state + .callback + .send(Err(StatusCode::BadCommunicationError)); + } + MessageIsFinalType::Final => { + message_state.chunks.push(MessageChunkWithChunkInfo { + header: chunk_info, + data_with_header: chunk.data, + }); + let message_state = self.message_states.remove(&req_id).unwrap(); + let in_chunks = Self::merge_chunks(message_state.chunks)?; + let message = self.turn_received_chunks_into_message(&in_chunks)?; + + let _ = message_state.callback.send(Ok(message)); + } + } + Ok(()) + } + + fn turn_received_chunks_into_message( + &mut self, + chunks: &[MessageChunk], + ) -> Result { + // Validate that all chunks have incrementing sequence numbers and valid chunk types + let secure_channel = trace_read_lock!(self.secure_channel); + self.last_received_sequence_number = Chunker::validate_chunks( + self.last_received_sequence_number + 1, + &secure_channel, + chunks, + )?; + // Now decode + Chunker::decode(chunks, &secure_channel, None) + } + + fn merge_chunks( + mut chunks: Vec, + ) -> Result, StatusCode> { + if chunks.len() == 1 { + return Ok(vec![MessageChunk { + data: chunks.pop().unwrap().data_with_header, + }]); + } + chunks.sort_by(|a, b| { + a.header + .sequence_header + .sequence_number + .cmp(&b.header.sequence_header.sequence_number) + }); + let mut ret = Vec::with_capacity(chunks.len()); + let mut expect_sequence_number = chunks + .get(0) + .unwrap() + .header + .sequence_header + .sequence_number; + for c in chunks { + if c.header.sequence_header.sequence_number != expect_sequence_number { + info!( + "receive wrong chunk expect seq={},got={}", + expect_sequence_number, c.header.sequence_header.sequence_number + ); + continue; //may be duplicate chunk + } + expect_sequence_number += 1; + ret.push(MessageChunk { + data: c.data_with_header, + }); + } + Ok(ret) + } + + /// Close the transport, aborting any pending requests. + /// If `status` is good, the pending requests will be terminated with + /// `BadConnectionClosed`. + pub async fn close(&mut self, status: StatusCode) -> StatusCode { + // If the status is good, we still want to send a bad status code + // to the pending requests. They didn't succeed, after all. + let request_status = if status.is_good() { + StatusCode::BadConnectionClosed + } else { + status + }; + + for (_, pending) in self.message_states.drain() { + let _ = pending.callback.send(Err(request_status)); + } + + // Make sure we also send a bad status for any remaining messages in the queue + // Close the channel first. + self.outgoing_recv.close(); + + // recv is no longer blocking. + while let Some(msg) = self.outgoing_recv.recv().await { + if let Some(cb) = msg.callback { + let _ = cb.send(Err(request_status)); + } + } + + status + } +} diff --git a/lib/src/client/transport/mod.rs b/lib/src/client/transport/mod.rs new file mode 100644 index 000000000..575a6f5df --- /dev/null +++ b/lib/src/client/transport/mod.rs @@ -0,0 +1,9 @@ +mod buffer; +mod channel; +mod core; +mod state; +pub mod tcp; + +pub use channel::{AsyncSecureChannel, SecureChannelEventLoop}; +pub(crate) use core::OutgoingMessage; +pub use core::TransportPollResult; diff --git a/lib/src/client/transport/state.rs b/lib/src/client/transport/state.rs new file mode 100644 index 000000000..b3785e56d --- /dev/null +++ b/lib/src/client/transport/state.rs @@ -0,0 +1,215 @@ +use std::{ + sync::{atomic::AtomicU32, Arc}, + time::{Duration, Instant}, +}; + +use tokio::sync::mpsc::error::SendTimeoutError; + +use crate::{ + client::{session::process_unexpected_response, transport::OutgoingMessage}, + core::{ + comms::secure_channel::SecureChannel, handle::AtomicHandle, + supported_message::SupportedMessage, + }, + crypto::SecurityPolicy, + sync::RwLock, + types::{ + DateTime, DiagnosticBits, MessageSecurityMode, NodeId, OpenSecureChannelRequest, + RequestHeader, SecurityTokenRequestType, StatusCode, + }, +}; +use arc_swap::ArcSwap; + +pub type RequestSend = tokio::sync::mpsc::Sender; + +lazy_static! { + static ref NEXT_SESSION_ID: AtomicU32 = AtomicU32::new(1); +} + +pub struct SecureChannelState { + /// Time offset between the client and the server. + client_offset: ArcSwap, + /// Ignore clock skew between the client and the server. + ignore_clock_skew: bool, + /// Secure channel information + secure_channel: Arc>, + /// The session authentication token, used for session activation + authentication_token: Arc>, + /// The next handle to assign to a request + request_handle: AtomicHandle, +} + +pub(super) struct Request { + payload: SupportedMessage, + sender: RequestSend, + timeout: std::time::Duration, +} + +impl Request { + pub fn new( + payload: impl Into, + sender: RequestSend, + timeout: Duration, + ) -> Self { + Self { + payload: payload.into(), + sender, + timeout, + } + } + + pub async fn send_no_response(self) -> Result<(), StatusCode> { + let message = OutgoingMessage { + request: self.payload, + callback: None, + deadline: Instant::now() + self.timeout, + }; + + match self.sender.send_timeout(message, self.timeout).await { + Ok(()) => Ok(()), + Err(SendTimeoutError::Closed(_)) => Err(StatusCode::BadConnectionClosed), + Err(SendTimeoutError::Timeout(_)) => Err(StatusCode::BadTimeout), + } + } + + pub async fn send(self) -> Result { + let (cb_send, cb_recv) = tokio::sync::oneshot::channel(); + + let message = OutgoingMessage { + request: self.payload, + callback: Some(cb_send), + deadline: Instant::now() + self.timeout, + }; + + match self.sender.send_timeout(message, self.timeout).await { + Ok(()) => (), + Err(SendTimeoutError::Closed(_)) => return Err(StatusCode::BadConnectionClosed), + Err(SendTimeoutError::Timeout(_)) => return Err(StatusCode::BadTimeout), + } + + match cb_recv.await { + Ok(r) => r, + // Should not really happen, would mean something paniced. + Err(_) => Err(StatusCode::BadConnectionClosed), + } + } +} + +impl SecureChannelState { + const FIRST_REQUEST_HANDLE: u32 = 1; + + pub fn new( + ignore_clock_skew: bool, + secure_channel: Arc>, + authentication_token: Arc>, + ) -> Self { + SecureChannelState { + client_offset: ArcSwap::new(Arc::new(chrono::Duration::zero())), + ignore_clock_skew, + secure_channel, + authentication_token, + request_handle: AtomicHandle::new(Self::FIRST_REQUEST_HANDLE), + } + } + + pub(crate) fn begin_issue_or_renew_secure_channel( + &self, + request_type: SecurityTokenRequestType, + timeout: Duration, + sender: RequestSend, + ) -> Request { + trace!("issue_or_renew_secure_channel({:?})", request_type); + + const REQUESTED_LIFETIME: u32 = 60000; // TODO + + let (security_mode, security_policy, client_nonce) = { + let mut secure_channel = trace_write_lock!(self.secure_channel); + let client_nonce = secure_channel.security_policy().random_nonce(); + secure_channel.set_local_nonce(client_nonce.as_ref()); + ( + secure_channel.security_mode(), + secure_channel.security_policy(), + client_nonce, + ) + }; + + info!("Making secure channel request"); + info!("security_mode = {:?}", security_mode); + info!("security_policy = {:?}", security_policy); + + let requested_lifetime = REQUESTED_LIFETIME; + let request = OpenSecureChannelRequest { + request_header: self.make_request_header(timeout), + client_protocol_version: 0, + request_type, + security_mode, + client_nonce, + requested_lifetime, + }; + + Request::new(request, sender, timeout) + } + + pub fn set_client_offset(&self, offset: chrono::Duration) { + // This is not strictly speaking thread safe, but it doesn't really matter in this case, + // the assumption is that this is only called from a single thread at once. + self.client_offset + .store(Arc::new(**self.client_offset.load() + offset)); + debug!("Client offset set to {}", self.client_offset); + } + + pub(crate) fn end_issue_or_renew_secure_channel( + &self, + response: SupportedMessage, + ) -> Result<(), StatusCode> { + if let SupportedMessage::OpenSecureChannelResponse(response) = response { + // Extract the security token from the response. + let mut security_token = response.security_token.clone(); + + // When ignoring clock skew, we calculate the time offset between the client and the + // server and use that offset to compensate for the difference in time when setting + // the timestamps in the request headers and when decoding timestamps in messages + // received from the server. + if self.ignore_clock_skew && !response.response_header.timestamp.is_null() { + let offset = response.response_header.timestamp - DateTime::now(); + // Make sure to apply the offset to the security token in the current response. + security_token.created_at = security_token.created_at - offset; + // Update the client offset by adding the new offset. When the secure channel is + // renewed its already using the client offset calculated when issuing the secure + // channel and only needs to be updated to accommodate any additional clock skew. + self.set_client_offset(offset); + } + + debug!("Setting transport's security token"); + { + let mut secure_channel = trace_write_lock!(self.secure_channel); + secure_channel.set_client_offset(**self.client_offset.load()); + secure_channel.set_security_token(security_token); + + if secure_channel.security_policy() != SecurityPolicy::None + && (secure_channel.security_mode() == MessageSecurityMode::Sign + || secure_channel.security_mode() == MessageSecurityMode::SignAndEncrypt) + { + secure_channel.set_remote_nonce_from_byte_string(&response.server_nonce)?; + secure_channel.derive_keys(); + } + } + Ok(()) + } else { + Err(process_unexpected_response(response)) + } + } + + /// Construct a request header for the session. All requests after create session are expected + /// to supply an authentication token. + pub fn make_request_header(&self, timeout: Duration) -> RequestHeader { + RequestHeader { + authentication_token: self.authentication_token.load().as_ref().clone(), + timestamp: DateTime::now_with_offset(**self.client_offset.load()), + request_handle: self.request_handle.next(), + return_diagnostics: DiagnosticBits::empty(), + timeout_hint: timeout.as_millis().min(u32::MAX as u128) as u32, + ..Default::default() + } + } +} diff --git a/lib/src/client/transport/tcp.rs b/lib/src/client/transport/tcp.rs new file mode 100644 index 000000000..fc2fc7aa1 --- /dev/null +++ b/lib/src/client/transport/tcp.rs @@ -0,0 +1,277 @@ +use std::sync::Arc; + +use super::buffer::SendBuffer; +use super::core::{OutgoingMessage, TransportPollResult, TransportState}; +use crate::core::comms::{ + secure_channel::SecureChannel, + tcp_codec::{Message, TcpCodec}, + tcp_types::HelloMessage, + url::hostname_port_from_url, +}; +use crate::core::supported_message::SupportedMessage; +use crate::types::{encoding::BinaryEncoder, StatusCode}; +use futures::StreamExt; +use parking_lot::RwLock; +use tokio::io::{AsyncWriteExt, ReadHalf, WriteHalf}; +use tokio::net::TcpStream; +use tokio_util::codec::FramedRead; + +#[derive(Debug, Clone, Copy)] +enum TransportCloseState { + Open, + Closing(StatusCode), + Closed(StatusCode), +} + +pub(crate) struct TcpTransport { + state: TransportState, + read: FramedRead, TcpCodec>, + write: WriteHalf, + send_buffer: SendBuffer, + should_close: bool, + closed: TransportCloseState, +} + +#[derive(Debug, Clone)] +pub struct TransportConfiguration { + pub max_pending_incoming: usize, + pub max_inflight: usize, + pub send_buffer_size: usize, + pub recv_buffer_size: usize, + pub max_message_size: usize, + pub max_chunk_count: usize, +} + +impl TcpTransport { + /// Attempt to establish a connection to the OPC UA endpoint given by `endpoint_url`. + /// Note that on success, this returns a `TcpTransport`. The caller is responsible for + /// calling `run` on the returned transport in order to actually send and receive messages. + pub async fn connect( + secure_channel: Arc>, + outgoing_recv: tokio::sync::mpsc::Receiver, + config: TransportConfiguration, + endpoint_url: &str, + ) -> Result { + let (framed_read, writer) = + match Self::connect_inner(&secure_channel, &config, endpoint_url).await { + Ok(k) => k, + Err(status) => return Err(status), + }; + + Ok(Self { + state: TransportState::new( + secure_channel, + outgoing_recv, + config.max_pending_incoming, + config.max_inflight, + ), + read: framed_read, + write: writer, + send_buffer: SendBuffer::new( + config.send_buffer_size, + config.max_message_size, + config.max_chunk_count, + ), + should_close: false, + closed: TransportCloseState::Open, + }) + } + + async fn connect_inner( + secure_channel: &RwLock, + config: &TransportConfiguration, + endpoint_url: &str, + ) -> Result< + ( + FramedRead, TcpCodec>, + WriteHalf, + ), + StatusCode, + > { + let (host, port) = hostname_port_from_url( + endpoint_url, + crate::core::constants::DEFAULT_OPC_UA_SERVER_PORT, + )?; + + let addr = { + let addr = format!("{}:{}", host, port); + match tokio::net::lookup_host(addr).await { + Ok(mut addrs) => { + if let Some(addr) = addrs.next() { + addr + } else { + error!( + "Invalid address {}, does not resolve to any socket", + endpoint_url + ); + return Err(StatusCode::BadTcpEndpointUrlInvalid); + } + } + Err(e) => { + error!("Invalid address {}, cannot be parsed {:?}", endpoint_url, e); + return Err(StatusCode::BadTcpEndpointUrlInvalid); + } + } + }; + + debug!("Connecting to {} with url {}", addr, endpoint_url); + + let socket = TcpStream::connect(&addr).await.map_err(|err| { + error!("Could not connect to host {}, {:?}", addr, err); + StatusCode::BadCommunicationError + })?; + + let (reader, mut writer) = tokio::io::split(socket); + + let hello = HelloMessage::new( + &endpoint_url, + config.send_buffer_size, + config.recv_buffer_size, + config.max_message_size, + config.max_chunk_count, + ); + let mut framed_read = { + let secure_channel = trace_read_lock!(secure_channel); + FramedRead::new(reader, TcpCodec::new(secure_channel.decoding_options())) + }; + + writer + .write_all(&hello.encode_to_vec()) + .await + .map_err(|err| { + error!("Cannot send hello to server, err = {:?}", err); + StatusCode::BadCommunicationError + })?; + match framed_read.next().await { + Some(Ok(Message::Acknowledge(ack))) => { + // TODO revise our sizes and other things according to the ACK + log::trace!("Received acknowledgement: {:?}", ack); + } + other => { + error!( + "Unexpected error while waiting for server ACK. Expected ACK, got {:?}", + other + ); + return Err(StatusCode::BadConnectionClosed); + } + } + + Ok((framed_read, writer)) + } + + fn handle_incoming_message( + &mut self, + incoming: Option>, + ) -> TransportPollResult { + let Some(incoming) = incoming else { + return TransportPollResult::Closed(StatusCode::BadCommunicationError); + }; + match incoming { + Ok(message) => { + if let Err(e) = self.state.handle_incoming_message(message) { + TransportPollResult::Closed(e) + } else { + TransportPollResult::IncomingMessage + } + } + Err(err) => { + error!("Error reading from stream {:?}", err); + TransportPollResult::Closed(StatusCode::BadConnectionClosed) + } + } + } + + async fn poll_inner(&mut self) -> TransportPollResult { + // Either we've got something in the send buffer, which we can send, + // or we're waiting for more outgoing messages. + // We won't wait for outgoing messages while sending, since that + // could cause the send buffer to fill up. + + // If there's nothing in the send buffer, but there are chunks available, + // write them to the send buffer before proceeding. + if self.send_buffer.should_encode_chunks() { + let secure_channel = trace_read_lock!(self.state.secure_channel); + if let Err(e) = self.send_buffer.encode_next_chunk(&secure_channel) { + return TransportPollResult::Closed(e); + } + } + + // If there is something in the send buffer, write to the stream. + // If not, wait for outgoing messages. + // Either way, listen to incoming messages while we do this. + if self.send_buffer.can_read() { + tokio::select! { + r = self.send_buffer.read_into_async(&mut self.write) => { + if let Err(e) = r { + error!("write bytes task failed: {}", e); + return TransportPollResult::Closed(StatusCode::BadCommunicationError); + } + TransportPollResult::OutgoingMessageSent + } + incoming = self.read.next() => { + self.handle_incoming_message(incoming) + } + } + } else { + if self.should_close { + debug!("Writer is setting the connection state to finished(good)"); + return TransportPollResult::Closed(StatusCode::Good); + } + tokio::select! { + outgoing = self.state.wait_for_outgoing_message(&mut self.send_buffer) => { + let Some((outgoing, request_id)) = outgoing else { + return TransportPollResult::Closed(StatusCode::Good); + }; + let close_connection = + matches!(outgoing, SupportedMessage::CloseSecureChannelRequest(_)); + if close_connection { + self.should_close = true; + debug!("Writer is about to send a CloseSecureChannelRequest which means it should close in a moment"); + } + let secure_channel = trace_read_lock!(self.state.secure_channel); + if let Err(e) = self.send_buffer.write(request_id, outgoing, &secure_channel) { + TransportPollResult::Closed(e) + } else { + TransportPollResult::OutgoingMessage + } + } + incoming = self.read.next() => { + self.handle_incoming_message(incoming) + } + } + } + } + + pub async fn poll(&mut self) -> TransportPollResult { + // We want poll to be cancel safe, this means that if we stop polling + // a future returned from poll, we do not lose data or get in an + // inconsistent state. + // `poll_inner` is cancel safe, because all the async methods it + // calls are cancel safe, and it only ever finishes one future. + // The only thing that isn't cancel safe is when we close the channel. + // `close` can be called multiple times, and will continue where it left off, + // so all we have to do is keep calling close until we manage to complete it, + // and _then_ we can set the state to `closed`. + match self.closed { + TransportCloseState::Open => {} + TransportCloseState::Closing(c) => { + // Close is kind-of cancel safe, in that + // calling it multiple times is safe. + let r = self.state.close(c).await; + self.closed = TransportCloseState::Closed(c); + return TransportPollResult::Closed(r); + } + TransportCloseState::Closed(c) => { + return TransportPollResult::Closed(c); + } + } + + let r = self.poll_inner().await; + if let TransportPollResult::Closed(status) = &r { + self.closed = TransportCloseState::Closing(*status); + let r = self.state.close(*status).await; + self.closed = TransportCloseState::Closed(r); + } + r + } +} diff --git a/lib/src/core/comms/secure_channel.rs b/lib/src/core/comms/secure_channel.rs index 572c2c99c..99a9905f6 100644 --- a/lib/src/core/comms/secure_channel.rs +++ b/lib/src/core/comms/secure_channel.rs @@ -590,7 +590,8 @@ impl SecureChannel { } else { let size = message_chunk.data.len(); if size > dst.len() { - panic!("The size of the message chunk {} exceeds the size of the destination buffer {}", size, dst.len()) + error!("The size of the message chunk {} exceeds the size of the destination buffer {}", size, dst.len()); + return Err(StatusCode::BadEncodingLimitsExceeded); } dst[..size].copy_from_slice(&message_chunk.data[..]); size diff --git a/lib/src/core/handle.rs b/lib/src/core/handle.rs index fe967939e..8ed8e24ff 100644 --- a/lib/src/core/handle.rs +++ b/lib/src/core/handle.rs @@ -2,7 +2,10 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2024 Adam Lock -use std::u32; +use std::{ + sync::atomic::{AtomicU32, Ordering}, + u32, +}; /// A simple handle factory for incrementing sequences of numbers. #[derive(Debug, Clone, Serialize)] @@ -40,6 +43,56 @@ impl Handle { } } +/// Variant of the handle factory using atomics +pub struct AtomicHandle { + next: AtomicU32, + first: u32, +} + +impl AtomicHandle { + pub fn new(first: u32) -> Self { + Self { + next: AtomicU32::new(first), + first, + } + } + + pub fn next(&self) -> u32 { + let mut val = self.next.fetch_add(1, Ordering::Acquire); + + while val < self.first { + // On overflow, try to reset the next value to first + 1 + match self.next.compare_exchange( + val + 1, + self.first + 1, + Ordering::Release, + Ordering::SeqCst, + ) { + // If it succeeds, just use first directly. + Ok(_) => val = self.first, + Err(v) => { + if v >= self.first { + val = self.next.fetch_add(1, Ordering::Acquire); + } else { + val = v; + } + } + } + } + val + } + + pub fn set_next(&self, next: u32) { + debug_assert!(next >= self.first); + self.next.store(next, Ordering::Relaxed); + } + + /// Resets the handle to its initial state + pub fn reset(&self) { + self.set_next(self.first); + } +} + #[test] fn handle_increment() { // Expect sequential handles @@ -61,3 +114,25 @@ fn handle_wrap() { assert_eq!(h.next(), u32::MAX); assert_eq!(h.next(), u32::MAX - 2); } + +#[test] +fn atomic_handle_increment() { + // Expect sequential handles + let h = AtomicHandle::new(0); + assert_eq!(h.next(), 0); + assert_eq!(h.next(), 1); + assert_eq!(h.next(), 2); + let h = AtomicHandle::new(100); + assert_eq!(h.next(), 100); + assert_eq!(h.next(), 101); +} + +#[test] +fn atomic_handle_wrap() { + // Simulate wrapping around + let h = AtomicHandle::new(u32::MAX - 2); + assert_eq!(h.next(), u32::MAX - 2); + assert_eq!(h.next(), u32::MAX - 1); + assert_eq!(h.next(), u32::MAX); + assert_eq!(h.next(), u32::MAX - 2); +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 3f5f15752..bf689fa06 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -117,9 +117,6 @@ fn from_hex(v: &str) -> Vec { } mod prelude { - #[cfg(feature = "client")] - pub use crate::client::prelude::*; - pub use crate::core::prelude::*; #[cfg(feature = "server")] pub use crate::server::prelude::*; } diff --git a/lib/src/server/discovery/mod.rs b/lib/src/server/discovery/mod.rs index 885fdf9ce..20e3e1b38 100644 --- a/lib/src/server/discovery/mod.rs +++ b/lib/src/server/discovery/mod.rs @@ -2,9 +2,11 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2024 Adam Lock -use crate::client::prelude::ClientBuilder; +use std::path::PathBuf; -use crate::server::state::ServerState; +use crate::client::ClientBuilder; + +use super::prelude::RegisteredServer; // Note these two functions are presently informational, but in the future they could // be used to automatically set up trust between LDS and server if the server @@ -36,18 +38,21 @@ fn linux_lds_pki_dir() -> String { } /// Registers the specified endpoints with the specified discovery server -pub fn register_with_discovery_server(discovery_server_url: &str, server_state: &ServerState) { +pub async fn register_with_discovery_server( + discovery_server_url: &str, + registered_server: RegisteredServer, + pki_dir: PathBuf, +) { debug!( "register_with_discovery_server, for {}", discovery_server_url ); - let server_config = trace_read_lock!(server_state.config); // Create a client, ensuring to retry only once let client = ClientBuilder::new() .application_name("DiscoveryClient") .application_uri("urn:DiscoveryClient") - .pki_dir(server_config.pki_dir.clone()) + .pki_dir(pki_dir) .session_retry_limit(1) .client(); @@ -56,12 +61,14 @@ pub fn register_with_discovery_server(discovery_server_url: &str, server_state: // find_servers on it first. // Connect to the server and call find_servers to ensure it is a discovery server - match client.find_servers(discovery_server_url) { + match client.find_servers(discovery_server_url).await { Ok(servers) => { debug!("Servers on the discovery endpoint - {:?}", servers); // Register the server - let registered_server = server_state.registered_server(); - match client.register_server(discovery_server_url, registered_server) { + match client + .register_server(discovery_server_url, registered_server) + .await + { Ok(_) => {} Err(err) => { error!( diff --git a/lib/src/server/server.rs b/lib/src/server/server.rs index 8a66b030f..7475f81b0 100644 --- a/lib/src/server/server.rs +++ b/lib/src/server/server.rs @@ -4,7 +4,7 @@ //! Provides the [`Server`] type and functionality related to it. -use std::{marker::Sync, net::SocketAddr, panic::AssertUnwindSafe, sync::Arc}; +use std::{marker::Sync, net::SocketAddr, sync::Arc}; use tokio::{ self, @@ -555,23 +555,27 @@ impl Server { let mut last_registered = trace_lock!(last_registered); if now.duration_since(*last_registered) >= register_duration { *last_registered = now; - // Even though the client uses tokio internally, the client's API is synchronous - // so the registration will happen on its own thread. The expectation is that - // it will run and either succeed, or it will fail but either way the operation - // will have completed before the next timer fires. - let server_state = server_state.clone(); - let discovery_server_url = discovery_server_url.clone(); - let _ = std::thread::spawn(move || { - let _ = std::panic::catch_unwind(AssertUnwindSafe(move || { - let server_state = trace_read_lock!(server_state); - if server_state.is_running() { - discovery::register_with_discovery_server( - &discovery_server_url, - &server_state, - ); - } - })); - }); + drop(last_registered); + let (is_running, pki_dir, registered_server) = { + let server_state = trace_read_lock!(server_state); + let pki_dir = { + let config = server_state.config.read(); + config.pki_dir.clone() + }; + ( + server_state.is_running(), + pki_dir, + server_state.registered_server(), + ) + }; + if is_running { + discovery::register_with_discovery_server( + &discovery_server_url, + registered_server, + pki_dir, + ) + .await; + } } } info!("Discovery timer task is finished"); diff --git a/lib/src/server/services/message_handler.rs b/lib/src/server/services/message_handler.rs index c4eb238ee..35565e81e 100644 --- a/lib/src/server/services/message_handler.rs +++ b/lib/src/server/services/message_handler.rs @@ -477,7 +477,9 @@ impl MessageHandler { let mut session = trace_write_lock!(session); let last_service_request_timestamp = session.last_service_request_timestamp(); let elapsed = now - last_service_request_timestamp; - if elapsed.num_milliseconds() as f64 > session.session_timeout() { + if elapsed.num_milliseconds() as f64 > session.session_timeout() + && session.session_timeout() > 0.0 + { session.terminate_session(); error!("Session has timed out because too much time has elapsed between service calls - elapsed time = {}ms", elapsed.num_milliseconds()); Err(ServiceFault::new(request_header, StatusCode::BadSessionIdInvalid).into()) diff --git a/lib/src/types/encoding.rs b/lib/src/types/encoding.rs index a327d001e..4453d1fa9 100644 --- a/lib/src/types/encoding.rs +++ b/lib/src/types/encoding.rs @@ -8,16 +8,16 @@ use std::{ fmt::Debug, io::{Cursor, Read, Result, Write}, - sync::Arc, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, }; use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; use chrono::Duration; -use crate::{ - sync::Mutex, - types::{constants, status_codes::StatusCode}, -}; +use crate::types::{constants, status_codes::StatusCode}; pub type EncodingResult = std::result::Result; @@ -25,34 +25,37 @@ pub type EncodingResult = std::result::Result; /// decremented even if there is a panic unwind. #[derive(Debug)] pub struct DepthLock { - depth_gauge: Arc>, + depth_gauge: Arc, } impl Drop for DepthLock { fn drop(&mut self) { - let mut dg = trace_lock!(self.depth_gauge); - if dg.current_depth > 0 { - dg.current_depth -= 1; - } - // panic if current_depth == 0 is probably overkill and might have issues when drop - // is called from a panic. + // This will overflow back if the gauge is somehow at 0. That really should not be possible, if it is only ever + // incremented from `obtain` + self.depth_gauge + .current_depth + .fetch_sub(1, Ordering::Release); } } impl DepthLock { + fn new(depth_gauge: Arc) -> (Self, u64) { + let current = depth_gauge.current_depth.fetch_add(1, Ordering::Acquire); + + (Self { depth_gauge }, current) + } + /// The depth lock tests if the depth can increment and then obtains a lock on it. /// The lock will decrement the depth when it drops to ensure proper behaviour during unwinding. - pub fn obtain( - depth_gauge: Arc>, - ) -> core::result::Result { - let mut dg = trace_lock!(depth_gauge); - if dg.current_depth >= dg.max_depth { + pub fn obtain(depth_gauge: Arc) -> core::result::Result { + let max_depth = depth_gauge.max_depth; + let (gauge, val) = Self::new(depth_gauge); + + if val >= max_depth { warn!("Decoding in stream aborted due maximum recursion depth being reached"); Err(StatusCode::BadDecodingError) } else { - dg.current_depth += 1; - drop(dg); - Ok(Self { depth_gauge }) + Ok(gauge) } } } @@ -62,33 +65,34 @@ impl DepthLock { #[derive(Debug)] pub struct DepthGauge { /// Maximum decoding depth for recursive elements. Triggers when current depth equals max depth. - pub(crate) max_depth: usize, + pub(self) max_depth: u64, /// Current decoding depth for recursive elements. - pub(crate) current_depth: usize, + pub(self) current_depth: AtomicU64, } impl Default for DepthGauge { fn default() -> Self { - Self { - max_depth: constants::MAX_DECODING_DEPTH, - current_depth: 0, - } + Self::new(constants::MAX_DECODING_DEPTH) } } impl DepthGauge { + pub fn new(max_depth: u64) -> Self { + Self { + max_depth, + current_depth: AtomicU64::new(0), + } + } + pub fn minimal() -> Self { Self { max_depth: 1, ..Default::default() } } - pub fn max_depth(&self) -> usize { + pub fn max_depth(&self) -> u64 { self.max_depth } - pub fn current_depth(&self) -> usize { - self.current_depth - } } #[derive(Clone, Debug)] @@ -107,7 +111,7 @@ pub struct DecodingOptions { /// Maximum number of array elements. 0 actually means 0, i.e. no array permitted pub max_array_length: usize, /// Decoding depth gauge is used to check for recursion - pub decoding_depth_gauge: Arc>, + pub decoding_depth_gauge: Arc, } impl Default for DecodingOptions { @@ -119,7 +123,7 @@ impl Default for DecodingOptions { max_string_length: constants::MAX_STRING_LENGTH, max_byte_string_length: constants::MAX_BYTE_STRING_LENGTH, max_array_length: constants::MAX_ARRAY_LENGTH, - decoding_depth_gauge: Arc::new(Mutex::new(DepthGauge::default())), + decoding_depth_gauge: Arc::new(DepthGauge::default()), } } } @@ -132,7 +136,7 @@ impl DecodingOptions { max_string_length: 8192, max_byte_string_length: 8192, max_array_length: 8192, - decoding_depth_gauge: Arc::new(Mutex::new(DepthGauge::minimal())), + decoding_depth_gauge: Arc::new(DepthGauge::minimal()), ..Default::default() } } @@ -419,3 +423,51 @@ pub fn read_f64(stream: &mut dyn Read) -> EncodingResult { process_decode_io_result(result)?; Ok(LittleEndian::read_f64(&buf)) } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use super::{constants, DepthGauge, DepthLock}; + use crate::types::StatusCode; + + #[test] + fn depth_gauge() { + let dg = Arc::new(DepthGauge::default()); + + let max_depth = dg.max_depth(); + assert_eq!(max_depth, constants::MAX_DECODING_DEPTH); + + // Iterate the depth + { + let mut v = Vec::new(); + for _ in 0..max_depth { + v.push(DepthLock::obtain(dg.clone()).unwrap()); + } + + // Depth should now be MAX_DECODING_DEPTH + { + assert_eq!( + dg.current_depth.load(std::sync::atomic::Ordering::Relaxed), + max_depth + ); + } + + // Next obtain should fail + assert_eq!( + DepthLock::obtain(dg.clone()).unwrap_err(), + StatusCode::BadDecodingError + ); + + // DepthLocks drop here + } + + // Depth should be zero + { + assert_eq!( + dg.current_depth.load(std::sync::atomic::Ordering::Relaxed), + 0 + ); + } + } +} diff --git a/lib/src/types/mod.rs b/lib/src/types/mod.rs index b8495f496..80ae3b1ab 100644 --- a/lib/src/types/mod.rs +++ b/lib/src/types/mod.rs @@ -44,7 +44,7 @@ pub mod constants { /// Default maximum decoding depth for recursive data structures, i.e. if data is nested deeper than this it is /// an error during decoding. This is a security measure to stop deeply nested junk being sent to /// a server / client. - pub const MAX_DECODING_DEPTH: usize = 10; + pub const MAX_DECODING_DEPTH: u64 = 10; /// URI supplied for the None security policy pub const SECURITY_POLICY_NONE_URI: &str = "http://opcfoundation.org/UA/SecurityPolicy#None"; /// String used as shorthand in config files, debug etc.for `None` security policy diff --git a/lib/src/types/qualified_name.rs b/lib/src/types/qualified_name.rs index 42af9efe9..d57efe2e5 100644 --- a/lib/src/types/qualified_name.rs +++ b/lib/src/types/qualified_name.rs @@ -32,6 +32,12 @@ pub struct QualifiedName { pub name: UAString, } +impl Default for QualifiedName { + fn default() -> Self { + Self::null() + } +} + impl<'a> From<&'a str> for QualifiedName { fn from(value: &'a str) -> Self { Self { diff --git a/lib/src/types/service_types/read_value_id.rs b/lib/src/types/service_types/read_value_id.rs index b469ba705..ca2734442 100644 --- a/lib/src/types/service_types/read_value_id.rs +++ b/lib/src/types/service_types/read_value_id.rs @@ -13,7 +13,7 @@ use crate::types::{ }; use std::io::{Read, Write}; -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)] #[serde(rename_all = "PascalCase")] pub struct ReadValueId { pub node_id: NodeId, diff --git a/lib/src/types/tests/encoding.rs b/lib/src/types/tests/encoding.rs index ec01f04de..c35bfbc92 100644 --- a/lib/src/types/tests/encoding.rs +++ b/lib/src/types/tests/encoding.rs @@ -1,4 +1,3 @@ -use parking_lot::Mutex; use std::sync::Arc; use std::{io::Cursor, str::FromStr}; @@ -535,10 +534,7 @@ fn null_array() -> EncodingResult<()> { #[test] fn deep_encoding() { let decoding_options = DecodingOptions { - decoding_depth_gauge: Arc::new(Mutex::new(DepthGauge { - max_depth: 2, - current_depth: 0, - })), + decoding_depth_gauge: Arc::new(DepthGauge::new(2)), ..Default::default() }; @@ -558,42 +554,3 @@ fn deep_encoding() { let res = Variant::decode(&mut stream, &decoding_options); assert_eq!(res.unwrap_err(), StatusCode::BadDecodingError); } - -#[test] -fn depth_gauge() { - let dg = Arc::new(Mutex::new(DepthGauge::default())); - - let max_depth = { - let dg = trace_lock!(dg); - dg.max_depth() - }; - assert_eq!(max_depth, constants::MAX_DECODING_DEPTH); - - // Iterate the depth - { - let mut v = Vec::new(); - for _ in 0..max_depth { - v.push(DepthLock::obtain(dg.clone()).unwrap()); - } - - // Depth should now be MAX_DECODING_DEPTH - { - let dg = trace_lock!(dg); - assert_eq!(dg.current_depth(), max_depth); - } - - // Next obtain should fail - assert_eq!( - DepthLock::obtain(dg.clone()).unwrap_err(), - StatusCode::BadDecodingError - ); - - // DepthLocks drop here - } - - // Depth should be zero - { - let dg = trace_lock!(dg); - assert_eq!(dg.current_depth(), 0); - } -} diff --git a/samples/client.conf b/samples/client.conf index e6239a2fd..8fb8f9b07 100644 --- a/samples/client.conf +++ b/samples/client.conf @@ -40,13 +40,34 @@ endpoints: decoding_options: max_message_size: 327675 max_chunk_count: 5 + max_chunk_size: 65535 + max_incoming_chunk_size: 65535 max_string_length: 65535 max_byte_string_length: 65535 max_array_length: 1000 session_retry_limit: 10 -session_retry_interval: 10000 +session_retry_initial: + secs: 1 + nanos: 0 +session_retry_max: + secs: 30 + nanos: 0 +keep_alive_interval: + secs: 10 + nanos: 0 +request_timeout: + secs: 60 + nanos: 0 +publish_timeout: + secs: 60 + nanos: 0 +min_publish_interval: + secs: 1 + nanos: 0 +max_inflight_publish: 2 session_timeout: 0 performance: ignore_clock_skew: false - single_threaded_executor: true + recreate_monitored_items_chunk: 1000 + max_inflight_messages: 20 session_name: Rust OPC UA Client diff --git a/samples/discovery-client/Cargo.toml b/samples/discovery-client/Cargo.toml index b40e5d3fe..8b30e161c 100644 --- a/samples/discovery-client/Cargo.toml +++ b/samples/discovery-client/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] pico-args = "0.5" +tokio = { version = "1.36.0", features = ["full"] } [dependencies.opcua] path = "../../lib" diff --git a/samples/discovery-client/src/main.rs b/samples/discovery-client/src/main.rs index bda4f44df..01be91dd6 100644 --- a/samples/discovery-client/src/main.rs +++ b/samples/discovery-client/src/main.rs @@ -3,9 +3,14 @@ // Copyright (C) 2017-2024 Adam Lock //! This is a sample that calls find servers on a OPC UA discovery server + use std::str::FromStr; -use opcua::client::prelude::*; +use opcua::{ + client::{Client, ClientConfig}, + core::comms::url::is_opc_ua_binary_url, + crypto::SecurityPolicy, +}; struct Args { help: bool, @@ -36,7 +41,8 @@ Usage: const DEFAULT_DISCOVERY_URL: &str = "opc.tcp://localhost:4840/"; -fn main() -> Result<(), ()> { +#[tokio::main] +async fn main() -> Result<(), ()> { let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); @@ -50,20 +56,20 @@ fn main() -> Result<(), ()> { // The client API has a simple `find_servers` function that connects and returns servers for us. let mut client = Client::new(ClientConfig::new("DiscoveryClient", "urn:DiscoveryClient")); - match client.find_servers(url) { + match client.find_servers(url).await { Ok(servers) => { println!("Discovery server responded with {} servers:", servers.len()); - servers.iter().for_each(|server| { + for server in servers { // Each server is an `ApplicationDescription` println!("Server : {}", server.application_name); if let Some(ref discovery_urls) = server.discovery_urls { - discovery_urls.iter().for_each(|discovery_url| { - print_server_endpoints(discovery_url.as_ref()) - }); + for discovery_url in discovery_urls { + print_server_endpoints(discovery_url.as_ref()).await + } } else { println!(" No discovery urls for this server"); } - }); + } } Err(err) => { println!( @@ -76,7 +82,7 @@ fn main() -> Result<(), ()> { Ok(()) } -fn print_server_endpoints(discovery_url: &str) { +async fn print_server_endpoints(discovery_url: &str) { println!(" {}", discovery_url); if is_opc_ua_binary_url(discovery_url) { // Try to talk with it and get some endpoints @@ -84,7 +90,7 @@ fn print_server_endpoints(discovery_url: &str) { let client = Client::new(client_config); // Ask the server associated with the default endpoint for its list of endpoints - match client.get_server_endpoints_from_url(discovery_url) { + match client.get_server_endpoints_from_url(discovery_url).await { Result::Ok(endpoints) => { println!(" Server has these endpoints:"); endpoints.iter().for_each(|e| { diff --git a/samples/event-client/Cargo.toml b/samples/event-client/Cargo.toml index 3992235c7..071fc9369 100644 --- a/samples/event-client/Cargo.toml +++ b/samples/event-client/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] pico-args = "0.5" +tokio = { version = "1.36.0", features = ["full"] } [dependencies.opcua] path = "../../lib" diff --git a/samples/event-client/src/main.rs b/samples/event-client/src/main.rs index d562f2a3c..16c963748 100644 --- a/samples/event-client/src/main.rs +++ b/samples/event-client/src/main.rs @@ -9,9 +9,15 @@ //! 3. Subscribe to values and loop forever printing out their values use std::str::FromStr; use std::sync::Arc; +use std::time::Duration; -use opcua::client::prelude::*; -use opcua::sync::RwLock; +use opcua::client::{ClientBuilder, EventCallback, IdentityToken, Session}; +use opcua::crypto::SecurityPolicy; +use opcua::types::{ + AttributeId, ContentFilter, EventFilter, ExtensionObject, MessageSecurityMode, + MonitoredItemCreateRequest, NodeId, ObjectId, ObjectTypeId, QualifiedName, + SimpleAttributeOperand, StatusCode, TimestampsToReturn, UAString, UserTokenPolicy, +}; struct Args { help: bool, @@ -54,27 +60,30 @@ const DEFAULT_URL: &str = "opc.tcp://localhost:4855"; const DEFAULT_EVENT_SOURCE: &str = "i=2253"; const DEFAULT_EVENT_FIELDS: &str = "EventId,EventType,Message"; -fn main() -> Result<(), ()> { +#[tokio::main] +async fn main() -> Result<(), ()> { // Read command line arguments let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); - } else { - // Optional - enable OPC UA logging - opcua::console_logging::init(); - - // Make the client configuration - let mut client = ClientBuilder::new() - .application_name("Simple Client") - .application_uri("urn:SimpleClient") - .product_uri("urn:SimpleClient") - .trust_server_certs(true) - .create_sample_keypair(true) - .session_retry_limit(3) - .client() - .unwrap(); - - if let Ok(session) = client.connect_to_endpoint( + return Ok(()); + } + // Optional - enable OPC UA logging + opcua::console_logging::init(); + + // Make the client configuration + let mut client = ClientBuilder::new() + .application_name("Simple Client") + .application_uri("urn:SimpleClient") + .product_uri("urn:SimpleClient") + .trust_server_certs(true) + .create_sample_keypair(true) + .session_retry_limit(3) + .client() + .unwrap(); + + let (session, event_loop) = client + .new_session_from_endpoint( ( args.url.as_ref(), SecurityPolicy::None.to_str(), @@ -82,52 +91,60 @@ fn main() -> Result<(), ()> { UserTokenPolicy::anonymous(), ), IdentityToken::Anonymous, - ) { - if let Err(result) = - subscribe_to_events(session.clone(), &args.event_source, &args.event_fields) - { - println!( - "ERROR: Got an error while subscribing to variables - {}", - result - ); - } else { - // Loops forever. The publish thread will call the callback with changes on the variables - let _ = Session::run(session); - } - } + ) + .await + .unwrap(); + + let handle = event_loop.spawn(); + session.wait_for_connection().await; + + if let Err(result) = + subscribe_to_events(session.clone(), &args.event_source, &args.event_fields).await + { + println!( + "ERROR: Got an error while subscribing to variables - {}", + result + ); + let _ = session.disconnect().await; } + + handle.await.unwrap(); + Ok(()) } -fn subscribe_to_events( - session: Arc>, +async fn subscribe_to_events( + session: Arc, event_source: &str, event_fields: &str, ) -> Result<(), StatusCode> { - let session = session.read(); - let event_fields: Vec = event_fields.split(',').map(|s| s.into()).collect(); let event_callback = { let event_fields = event_fields.clone(); - EventCallback::new(move |events| { + EventCallback::new(move |event, _item| { // Handle events println!("Event from server:"); - if let Some(ref events) = events.events { - events.iter().for_each(|e| { - if let Some(ref event_values) = e.event_fields { - event_values.iter().enumerate().for_each(|(idx, field)| { - println!(" {}: {}", event_fields[idx], field); - }); - } + if let Some(ref event_values) = event { + event_values.iter().enumerate().for_each(|(idx, field)| { + println!(" {}: {}", event_fields[idx], field); }); } }) }; // Creates a subscription with an event callback - let subscription_id = - session.create_subscription(100.0, 12000, 50, 65535, 0, true, event_callback)?; + let subscription_id = session + .create_subscription( + Duration::from_millis(100), + 12000, + 50, + 65535, + 0, + true, + event_callback, + ) + .await?; println!("Created a subscription with id = {}", subscription_id); // Create monitored item on an event @@ -167,11 +184,14 @@ fn subscribe_to_events( ObjectId::EventFilter_Encoding_DefaultBinary, &event_filter, ); - if let Ok(result) = session.create_monitored_items( - subscription_id, - TimestampsToReturn::Neither, - &vec![item_to_create], - ) { + if let Ok(result) = session + .create_monitored_items( + subscription_id, + TimestampsToReturn::Neither, + vec![item_to_create], + ) + .await + { println!("Result of subscribing to event = {:?}", result); } else { println!("Cannot create monitored event!"); diff --git a/samples/mqtt-client/Cargo.toml b/samples/mqtt-client/Cargo.toml index c21e1bc42..1c6111264 100644 --- a/samples/mqtt-client/Cargo.toml +++ b/samples/mqtt-client/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dependencies] pico-args = "0.5" rumqttc = "0.23" +tokio = { version = "1.36.0", features = ["full"] } [dependencies.opcua] path = "../../lib" diff --git a/samples/mqtt-client/src/main.rs b/samples/mqtt-client/src/main.rs index b02c98d51..9bb6b3925 100644 --- a/samples/mqtt-client/src/main.rs +++ b/samples/mqtt-client/src/main.rs @@ -4,17 +4,17 @@ //! This is a sample OPC UA Client that connects to the specified server, fetches some //! values before exiting. -use std::{ - path::PathBuf, - sync::{mpsc, Arc}, - thread, - time::Duration, -}; +use std::{path::PathBuf, sync::Arc, time::Duration}; -use rumqttc::{Client as MqttClient, MqttOptions, QoS}; +use rumqttc::{AsyncClient as MqttClient, MqttOptions, QoS}; -use opcua::client::prelude::*; -use opcua::sync::{Mutex, RwLock}; +use opcua::{ + client::{Client, ClientConfig, DataChangeCallback, Session}, + core::config::Config, + sync::Mutex, + types::{DataValue, MonitoredItemCreateRequest, NodeId, StatusCode, TimestampsToReturn}, +}; +use tokio::{select, sync::mpsc}; struct Args { help: bool, @@ -70,29 +70,35 @@ const DEFAULT_MQTT_PORT: u16 = 1883; // 4. Publish those values to an MQTT broker (default broker.hivemq.com:1883) // 5. User can observe result on the broker (e.g. http://www.mqtt-dashboard.com/) -fn main() -> Result<(), ()> { +#[tokio::main] +async fn main() -> Result<(), ()> { let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); - } else { - let mqtt_host = args.host; - let mqtt_port = args.port; - let config_file = args.config; - let endpoint_id = args.endpoint_id; - - // Optional - enable OPC UA logging - opcua::console_logging::init(); - - // The way this will work is the mqtt connection will live in its own thread, listening for - // events that are sent to it. - let (tx, rx) = mpsc::channel::<(NodeId, DataValue)>(); - let _ = thread::spawn(move || { - let mut mqtt_options = MqttOptions::new("test-id", mqtt_host, mqtt_port); - mqtt_options.set_keep_alive(Duration::from_secs(5)); - let (mut mqtt_client, _) = MqttClient::new(mqtt_options, 10); - - loop { - let (node_id, data_value) = rx.recv().unwrap(); + return Ok(()); + } + let mqtt_host = args.host; + let mqtt_port = args.port; + let config_file = args.config; + let endpoint_id = args.endpoint_id; + + // Optional - enable OPC UA logging + opcua::console_logging::init(); + + // The way this will work is the mqtt connection will live in its own thread, listening for + // events that are sent to it. + let (tx, mut rx) = mpsc::unbounded_channel::<(NodeId, DataValue)>(); + let _mqtt_handle = tokio::task::spawn(async move { + let mut mqtt_options = MqttOptions::new("test-id", mqtt_host, mqtt_port); + mqtt_options.set_keep_alive(Duration::from_secs(5)); + let (mqtt_client, mut event_loop) = MqttClient::new(mqtt_options, 10); + + select! { + _ = event_loop.poll() => {}, + r = rx.recv() => { + let Some((node_id, data_value)) = r else { + return; + }; let topic = format!( "opcua-rust/mqtt-client/{}/{}", node_id.namespace, node_id.identifier @@ -105,77 +111,69 @@ fn main() -> Result<(), ()> { println!("Publishing {} = {}", topic, value); let value = value.into_bytes(); - let _ = mqtt_client.publish(topic, QoS::AtLeastOnce, false, value); + let _ = mqtt_client.publish(topic, QoS::AtLeastOnce, false, value).await; } - }); - - // Use the sample client config to set up a client. The sample config has a number of named - // endpoints one of which is marked as the default. - let mut client = Client::new(ClientConfig::load(&PathBuf::from(config_file)).unwrap()); - let endpoint_id: Option<&str> = if !endpoint_id.is_empty() { - Some(&endpoint_id) - } else { - None - }; - let ns = 2; - if let Ok(session) = client.connect_to_endpoint_id(endpoint_id) { - let _ = subscription_loop(session, tx, ns).map_err(|err| { - println!("ERROR: Got an error while performing action - {}", err); - }); } - } + }); + + // Use the sample client config to set up a client. The sample config has a number of named + // endpoints one of which is marked as the default. + let mut client = Client::new(ClientConfig::load(&PathBuf::from(config_file)).unwrap()); + let endpoint_id: Option<&str> = if !endpoint_id.is_empty() { + Some(&endpoint_id) + } else { + None + }; + let ns = 2; + let (session, event_loop) = client.connect_to_endpoint_id(endpoint_id).await.unwrap(); + let handle = event_loop.spawn(); + + session.wait_for_connection().await; + + subscribe_to_events(session, tx, ns).await.map_err(|err| { + println!("ERROR: Got an error while performing action - {}", err); + })?; + + handle.await.unwrap(); Ok(()) } -fn subscription_loop( - session: Arc>, - tx: mpsc::Sender<(NodeId, DataValue)>, +async fn subscribe_to_events( + session: Arc, + tx: mpsc::UnboundedSender<(NodeId, DataValue)>, ns: u16, ) -> Result<(), StatusCode> { // Create a subscription println!("Creating subscription"); - // This scope is important - we don't want to session to be locked when the code hits the - // loop below - { - let session = session.read(); - - // Creates our subscription - one update every second. The update is sent as a message - // to the MQTT thread to be published. - let tx = Arc::new(Mutex::new(tx)); - let subscription_id = session.create_subscription( - 1000f64, + // Creates our subscription - one update every second. The update is sent as a message + // to the MQTT thread to be published. + let tx = Arc::new(Mutex::new(tx)); + let subscription_id = session + .create_subscription( + Duration::from_secs(1), 10, 30, 0, 0, true, - DataChangeCallback::new(move |items| { + DataChangeCallback::new(move |dv, item| { println!("Data change from server:"); let tx = tx.lock(); - items.iter().for_each(|item| { - let node_id = item.item_to_monitor().node_id.clone(); - let value = item.last_value().clone(); - let _ = tx.send((node_id, value)); - }); + let _ = tx.send((item.item_to_monitor().node_id.clone(), dv)); }), - )?; - println!("Created a subscription with id = {}", subscription_id); - - // Create some monitored items - let items_to_create: Vec = ["v1", "v2", "v3", "v4"] - .iter() - .map(|v| NodeId::new(ns, *v).into()) - .collect(); - let _ = session.create_monitored_items( - subscription_id, - TimestampsToReturn::Both, - &items_to_create, - )?; - } - - // Loops forever. The publish thread will call the callback with changes on the variables - let _ = Session::run(session); + ) + .await?; + println!("Created a subscription with id = {}", subscription_id); + + // Create some monitored items + let items_to_create: Vec = ["v1", "v2", "v3", "v4"] + .iter() + .map(|v| NodeId::new(ns, *v).into()) + .collect(); + let _ = session + .create_monitored_items(subscription_id, TimestampsToReturn::Both, items_to_create) + .await?; Ok(()) } diff --git a/samples/simple-client/Cargo.toml b/samples/simple-client/Cargo.toml index f8cefaef4..cccafe87c 100644 --- a/samples/simple-client/Cargo.toml +++ b/samples/simple-client/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] pico-args = "0.5" +tokio = { version = "1.36.0", features = ["full"] } [dependencies.opcua] path = "../../lib" diff --git a/samples/simple-client/src/main.rs b/samples/simple-client/src/main.rs index ffa48cdbc..639a6effa 100644 --- a/samples/simple-client/src/main.rs +++ b/samples/simple-client/src/main.rs @@ -7,10 +7,16 @@ //! 1. Create a client configuration //! 2. Connect to an endpoint specified by the url with security None //! 3. Subscribe to values and loop forever printing out their values -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; -use opcua::client::prelude::*; -use opcua::sync::RwLock; +use opcua::{ + client::{ClientBuilder, DataChangeCallback, IdentityToken, MonitoredItem, Session}, + crypto::SecurityPolicy, + types::{ + DataValue, MessageSecurityMode, MonitoredItemCreateRequest, NodeId, StatusCode, + TimestampsToReturn, UserTokenPolicy, + }, +}; struct Args { help: bool, @@ -41,27 +47,30 @@ Usage: const DEFAULT_URL: &str = "opc.tcp://localhost:4855"; -fn main() -> Result<(), ()> { +#[tokio::main] +async fn main() -> Result<(), ()> { // Read command line arguments let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); - } else { - // Optional - enable OPC UA logging - opcua::console_logging::init(); - - // Make the client configuration - let mut client = ClientBuilder::new() - .application_name("Simple Client") - .application_uri("urn:SimpleClient") - .product_uri("urn:SimpleClient") - .trust_server_certs(true) - .create_sample_keypair(true) - .session_retry_limit(3) - .client() - .unwrap(); - - if let Ok(session) = client.connect_to_endpoint( + return Ok(()); + } + // Optional - enable OPC UA logging + opcua::console_logging::init(); + + // Make the client configuration + let mut client = ClientBuilder::new() + .application_name("Simple Client") + .application_uri("urn:SimpleClient") + .product_uri("urn:SimpleClient") + .trust_server_certs(true) + .create_sample_keypair(true) + .session_retry_limit(3) + .client() + .unwrap(); + + let (session, event_loop) = client + .new_session_from_endpoint( ( args.url.as_ref(), SecurityPolicy::None.to_str(), @@ -69,38 +78,41 @@ fn main() -> Result<(), ()> { UserTokenPolicy::anonymous(), ), IdentityToken::Anonymous, - ) { - if let Err(result) = subscribe_to_variables(session.clone(), 2) { - println!( - "ERROR: Got an error while subscribing to variables - {}", - result - ); - } else { - // Loops forever. The publish thread will call the callback with changes on the variables - let _ = Session::run(session); - } - } + ) + .await + .unwrap(); + let handle = event_loop.spawn(); + session.wait_for_connection().await; + + if let Err(result) = subscribe_to_variables(session.clone(), 2).await { + println!( + "ERROR: Got an error while subscribing to variables - {}", + result + ); + let _ = session.disconnect().await; } + + handle.await.unwrap(); + Ok(()) } -fn subscribe_to_variables(session: Arc>, ns: u16) -> Result<(), StatusCode> { - let session = session.read(); +async fn subscribe_to_variables(session: Arc, ns: u16) -> Result<(), StatusCode> { // Creates a subscription with a data change callback - let subscription_id = session.create_subscription( - 2000.0, - 10, - 30, - 0, - 0, - true, - DataChangeCallback::new(|changed_monitored_items| { - println!("Data change from server:"); - changed_monitored_items - .iter() - .for_each(|item| print_value(item)); - }), - )?; + let subscription_id = session + .create_subscription( + Duration::from_secs(1), + 10, + 30, + 0, + 0, + true, + DataChangeCallback::new(|dv, item| { + println!("Data change from server:"); + print_value(&dv, item); + }), + ) + .await?; println!("Created a subscription with id = {}", subscription_id); // Create some monitored items @@ -108,18 +120,15 @@ fn subscribe_to_variables(session: Arc>, ns: u16) -> Result<(), .iter() .map(|v| NodeId::new(ns, *v).into()) .collect(); - let _ = session.create_monitored_items( - subscription_id, - TimestampsToReturn::Both, - &items_to_create, - )?; + let _ = session + .create_monitored_items(subscription_id, TimestampsToReturn::Both, items_to_create) + .await?; Ok(()) } -fn print_value(item: &MonitoredItem) { +fn print_value(data_value: &DataValue, item: &MonitoredItem) { let node_id = &item.item_to_monitor().node_id; - let data_value = item.last_value(); if let Some(ref value) = data_value.value { println!("Item \"{}\", Value = {:?}", node_id, value); } else { diff --git a/samples/web-client/Cargo.toml b/samples/web-client/Cargo.toml index 23db1d0ac..2e2f37056 100644 --- a/samples/web-client/Cargo.toml +++ b/samples/web-client/Cargo.toml @@ -11,6 +11,7 @@ serde = "1.0" serde_derive = "1.0" serde_json = "1.0" pico-args = "0.5" +futures-util = "0.3.30" tokio = { version = "1", features = ["full"] } [dependencies.opcua] diff --git a/samples/web-client/src/main.rs b/samples/web-client/src/main.rs index 43e1e238a..d33238f62 100644 --- a/samples/web-client/src/main.rs +++ b/samples/web-client/src/main.rs @@ -18,9 +18,21 @@ use actix_web::{ server::HttpServer, ws, App, Error, HttpRequest, HttpResponse, }; - -use opcua::client::prelude::*; -use opcua::sync::RwLock; +use futures_util::StreamExt; +use opcua::{ + client::{ + Client, ClientBuilder, DataChangeCallback, EventCallback, IdentityToken, Session, + SessionPollResult, + }, + crypto::SecurityPolicy, + types::{ + AttributeId, ContentFilter, ContentFilterElement, DataValue, EventFilter, ExtensionObject, + FilterOperator, MessageSecurityMode, MonitoredItemCreateRequest, NodeId, ObjectId, + ObjectTypeId, Operand, QualifiedName, ReferenceTypeId, SimpleAttributeOperand, + TimestampsToReturn, UAString, UserTokenPolicy, Variant, + }, +}; +use tokio::{pin, runtime::Runtime}; struct Args { help: bool, @@ -55,12 +67,17 @@ fn main() -> Result<(), ()> { let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); - } else { - // Optional - enable OPC UA logging - opcua::console_logging::init(); - // Run the http server - run_server(format!("127.0.0.1:{}", args.http_port)); + return Ok(()); } + + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap(); + // Optional - enable OPC UA logging + opcua::console_logging::init(); + // Run the http server + run_server(format!("127.0.0.1:{}", args.http_port), Arc::new(runtime)); Ok(()) } @@ -79,7 +96,7 @@ impl Message for DataChangeEvent { enum Event { ConnectionStatusChange(bool), DataChange(Vec), - Event(Vec), + Event(Option>), } const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5); @@ -92,9 +109,9 @@ struct OPCUASession { /// The OPC UA client client: Client, /// The OPC UA session - session: Option>>, - /// A sender that the session can use to terminate the corresponding OPC UA session - session_tx: Option>, + session: Option>, + /// The tokio runtime + rt: Arc, } impl Actor for OPCUASession { @@ -108,7 +125,8 @@ impl Actor for OPCUASession { fn stopping(&mut self, ctx: &mut Self::Context) -> Running { // Stop the OPC UA session - self.disconnect(ctx); + let rt = self.rt.clone(); + rt.block_on(self.disconnect(ctx)); Running::Stop } } @@ -138,6 +156,7 @@ impl StreamHandler for OPCUASession { fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) { // process websocket messages println!("WS: {:?}", msg); + let rt = self.rt.clone(); match msg { ws::Message::Ping(msg) => { self.hb = Instant::now(); @@ -149,17 +168,17 @@ impl StreamHandler for OPCUASession { ws::Message::Text(msg) => { let msg = msg.trim(); if let Some(msg) = msg.strip_prefix("connect ") { - self.connect(ctx, msg); + rt.block_on(self.connect(ctx, msg)); } else if msg.eq("disconnect") { - self.disconnect(ctx); + rt.block_on(self.disconnect(ctx)); } else if let Some(msg) = msg.strip_prefix("subscribe ") { // Node ids are comma separated let node_ids: Vec = msg.split(',').map(|s| s.to_string()).collect(); - self.subscribe(ctx, node_ids); + rt.block_on(self.subscribe(ctx, node_ids)); println!("subscription complete"); } else if let Some(msg) = msg.strip_prefix("add_event ") { let args: Vec = msg.split(',').map(|s| s.to_string()).collect(); - self.add_event(ctx, args); + rt.block_on(self.add_event(ctx, args)); println!("add event complete"); } } @@ -184,44 +203,49 @@ impl OPCUASession { }); } - fn connect(&mut self, ctx: &mut ::Context, opcua_url: &str) { - self.disconnect(ctx); + async fn connect(&mut self, ctx: &mut ::Context, opcua_url: &str) { + let _ = self.disconnect(ctx).await; let addr = ctx.address(); - let connected = match self.client.connect_to_endpoint( - ( - opcua_url, - SecurityPolicy::None.to_str(), - MessageSecurityMode::None, - UserTokenPolicy::anonymous(), - ), - IdentityToken::Anonymous, - ) { - Ok(session) => { - { - let mut session = session.write(); - let addr_for_connection_status_change = addr.clone(); - session.set_connection_status_callback(ConnectionStatusCallback::new( - move |connected| { - println!( - "Connection status has changed to {}", - if connected { - "connected" - } else { - "disconnected" - } - ); - addr_for_connection_status_change - .do_send(Event::ConnectionStatusChange(connected)); - }, - )); - session.set_session_closed_callback(SessionClosedCallback::new(|status| { - println!("Session has been closed, status = {}", status); - })); - } - self.session = Some(session); - self.session_tx = Some(Session::run_async(self.session.as_ref().unwrap().clone())); - true + let connected = match self + .client + .new_session_from_endpoint( + ( + opcua_url, + SecurityPolicy::None.to_str(), + MessageSecurityMode::None, + UserTokenPolicy::anonymous(), + ), + IdentityToken::Anonymous, + ) + .await + { + Ok((session, event_loop)) => { + let addr_for_connection_status_change = addr.clone(); + tokio::task::spawn(async move { + let stream = event_loop.enter(); + pin!(stream); + while let Some(msg) = stream.next().await { + match msg { + Ok(SessionPollResult::Reconnected(_)) => { + println!("Session is now connected"); + addr_for_connection_status_change + .do_send(Event::ConnectionStatusChange(true)); + } + Ok(SessionPollResult::ConnectionLost(s)) => { + println!("Lost connection with status: {s}"); + addr_for_connection_status_change + .do_send(Event::ConnectionStatusChange(false)); + } + Err(e) => { + println!("Session has been closed fatally, status = {}", e); + } + _ => {} + } + } + }); + self.session = Some(session.clone()); + session.wait_for_connection().await } Err(err) => { println!( @@ -235,15 +259,9 @@ impl OPCUASession { addr.do_send(Event::ConnectionStatusChange(connected)); } - fn disconnect(&mut self, _ctx: &mut ::Context) { - if let Some(ref mut session) = self.session { - let session = session.read(); - if session.is_connected() { - session.disconnect(); - } - } - if let Some(tx) = self.session_tx.take() { - let _ = tx.send(SessionCommand::Stop); + async fn disconnect(&mut self, _ctx: &mut ::Context) { + if let Some(ref session) = self.session { + let _ = session.disconnect().await; } self.session = None; } @@ -280,7 +298,7 @@ impl OPCUASession { } } - fn add_event(&mut self, ctx: &mut ::Context, args: Vec) { + async fn add_event(&mut self, ctx: &mut ::Context, args: Vec) { if args.len() != 3 { return; } @@ -289,8 +307,6 @@ impl OPCUASession { let select_criteria = args.get(2).unwrap(); if let Some(ref mut session) = self.session { - let session = session.read(); - let event_node_id = NodeId::from_str(event_node_id); if event_node_id.is_err() { return; @@ -359,18 +375,23 @@ impl OPCUASession { }; let addr_for_events = ctx.address(); - let event_callback = EventCallback::new(move |events| { + let event_callback = EventCallback::new(move |evt, _item| { // Handle events - if let Some(ref events) = events.events { - addr_for_events.do_send(Event::Event(events.clone())); - } else { - println!("Got an event notification with no events!?"); - } + addr_for_events.do_send(Event::Event(evt.clone())); }); // create a subscription containing events - if let Ok(subscription_id) = - session.create_subscription(500.0, 100, 300, 0, 0, true, event_callback) + if let Ok(subscription_id) = session + .create_subscription( + Duration::from_millis(500), + 100, + 300, + 0, + 0, + true, + event_callback, + ) + .await { // Monitor the item for events let mut item_to_create: MonitoredItemCreateRequest = event_node_id.into(); @@ -379,11 +400,14 @@ impl OPCUASession { ObjectId::EventFilter_Encoding_DefaultBinary, &event_filter, ); - if let Ok(result) = session.create_monitored_items( - subscription_id, - TimestampsToReturn::Both, - &vec![item_to_create], - ) { + if let Ok(result) = session + .create_monitored_items( + subscription_id, + TimestampsToReturn::Both, + vec![item_to_create], + ) + .await + { println!("Result of subscribing to event = {:?}", result); } else { println!("Cannot create monitored event!"); @@ -394,35 +418,38 @@ impl OPCUASession { } } - fn subscribe(&mut self, ctx: &mut ::Context, node_ids: Vec) { + async fn subscribe(&mut self, ctx: &mut ::Context, node_ids: Vec) { if let Some(ref mut session) = self.session { // Create a subscription println!("Creating subscription"); - let session = session.read(); // Creates our subscription let addr_for_datachange = ctx.address(); - let data_change_callback = DataChangeCallback::new(move |items| { + let data_change_callback = DataChangeCallback::new(move |data_value, item| { // Changes will be turned into a list of change events that sent to corresponding // web socket to be sent to the client. - let changes = items - .iter() - .map(|item| { - let item_to_monitor = item.item_to_monitor(); - DataChangeEvent { - node_id: item_to_monitor.node_id.clone().into(), - attribute_id: item_to_monitor.attribute_id, - value: item.last_value().clone(), - } - }) - .collect::>(); + let item_to_monitor = item.item_to_monitor(); + let change = DataChangeEvent { + node_id: item_to_monitor.node_id.clone().into(), + attribute_id: item_to_monitor.attribute_id, + value: data_value, + }; // Send the changes to the websocket session - addr_for_datachange.do_send(Event::DataChange(changes)); + addr_for_datachange.do_send(Event::DataChange(vec![change])); }); - if let Ok(subscription_id) = - session.create_subscription(500.0, 10, 30, 0, 0, true, data_change_callback) + if let Ok(subscription_id) = session + .create_subscription( + Duration::from_millis(500), + 10, + 30, + 0, + 0, + true, + data_change_callback, + ) + .await { println!("Created a subscription with id = {}", subscription_id); // Create some monitored items @@ -433,11 +460,14 @@ impl OPCUASession { node_id.into() }) .collect(); - if let Ok(_results) = session.create_monitored_items( - subscription_id, - TimestampsToReturn::Both, - &items_to_create, - ) { + if let Ok(_results) = session + .create_monitored_items( + subscription_id, + TimestampsToReturn::Both, + items_to_create, + ) + .await + { println!("Created monitored items"); } else { println!("Cannot create monitored items!"); @@ -466,18 +496,22 @@ fn ws_create_request(r: &HttpRequest) -> Result, +} -fn run_server(address: String) { +fn run_server(address: String, rt: Arc) { HttpServer::new(move || { let base_path = "./html"; - let state = HttpServerState {}; + let state = HttpServerState { + runtime: rt.clone(), + }; App::with_state(state) // Websocket .resource("/ws/", |r| r.method(http::Method::GET).f(ws_create_request))