diff --git a/.idea/workspace.xml b/.idea/workspace.xml
new file mode 100644
index 0000000..b1cc20c
--- /dev/null
+++ b/.idea/workspace.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/README.md b/README.md
index c3c7de2..24d9175 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@ A USD viewer on the web.
[Open USD Viewer](https://usd-viewer.glitch.me/)
There are two main parts:
-- [USD WASM bindings by Autodesk](https://autodesk-forks.github.io/USD/usd_for_web_demos/)
+- [USD WASM bindings by Autodesk](https://github.com/needle-tools/OpenUSD/tree/needle/feature/wasm-improvements)
- A [Three.js](https://threejs.org/) Hydra Delegate for rendering, originally by Autodesk and improved by hybridherbst
## Info and Known Issues
@@ -19,11 +19,10 @@ There are two main parts:
### Limitations
-- Skinned meshes aren't supported.
- Vertex colors aren't supported.
- Point instancing isn't supported.
- MaterialX isn't supported.
-- LightsAPI isn't supported.
+- LightsAPI isn't supported.
- Texture paths currently can't be resolved correctly for nested USDZ files. One level is fine.
- Fixing this would require adjustments to the WASM bindings.
@@ -87,37 +86,14 @@ NOTE: Origins for these instructions can be found [here](https://github.com/auto
4. Run `wasm-opt -Oz -o "../build_dir/bin/emHdBindings.wasm" "../build_dir/bin/emHdBindings.wasm" --enable-bulk-memory --enable-threads` to shrink the wasm file more.
5. Patch emHdBindings.js to enable the following support, unable to currently do these things as part of the normal build process
1. Support for arguments
- - `patch emHdBindings.js < patches/arguments_1.patch`
- - `patch emHdBindings.js < patches/arguments_2.patch`
- - THIS PATCH DOES NOT WORK
- - Copy the following lines:
- ```
- return function (
- moduleArg = {
- // module overrides can be supplied here
- locateFile: (path, prefix) => {
- if (!prefix)
- prefix = _scriptDir.substr(0, _scriptDir.lastIndexOf("/") + 1);
- return prefix + path;
- },
- ...args,
- },
- ) {
- ```
- And replace this line:
- - `return function (moduleArg = {}) {`
+ - `patch emHdBindings.js < arguments_1.patch`
+ - `patch emHdBindings.js -R < arguments_2.patch`
2. Disable ABORT so that one bad file doesn't corrupt the entire session
- - `patch emHdBindings.js < patches/abort.patch`
+ - `patch emHdBindings.js < abort.patch`
3. Add file system functions to the module
- - `patch emHdBindings.js < patches/fileSystem.patch`
- - THIS PATCH DOES NOT WORK
- - Add these lines
- ```
- Module["FS_readdir"] = FS.readdir;
- Module["FS_analyzePath"] = FS.analyzePath;
- ```
- right after
- - `Module["PThread"] = PThread;`
+ - `patch emHdBindings.js -R < fileSystem.patch`
+ 4. Include global export
+ - `echo -e '\nglobalThis["NEEDLE:USD:GET"] = getUsdModule;' >> "emHdBindings.js"`
##### Debug
1. Install [ C/C++ DevTools Support (DWARF)](https://chromewebstore.google.com/detail/cc++-devtools-support-dwa/pdcpmagijalfljmkmjngeonclgbbannb)
@@ -135,36 +111,13 @@ NOTE: Origins for these instructions can be found [here](https://github.com/auto
6. Patch emHdBindings.js to enable the following support, unable to currently do these things as part of the normal build process
1. Support for arguments
- `patch emHdBindings.js < arguments_1.patch`
- - `patch emHdBindings.js < arguments_2.patch`
- - THIS PATCH DOES NOT WORK
- - Copy the following lines:
- ```
- return function (
- moduleArg = {
- // module overrides can be supplied here
- locateFile: (path, prefix) => {
- if (!prefix)
- prefix = _scriptDir.substr(0, _scriptDir.lastIndexOf("/") + 1);
- return prefix + path;
- },
- ...args,
- },
- ) {
- ```
- And replace this line:
- - `return function (moduleArg = {}) {`
+ - `patch emHdBindings.js -R < arguments_2.patch`
2. Disable ABORT so that one bad file doesn't corrupt the entire session
- `patch emHdBindings.js < abort.patch`
3. Add file system functions to the module
- - `patch emHdBindings.js < fileSystem.patch`
- - THIS PATCH DOES NOT WORK
- - Add these lines
- ```
- Module["FS_readdir"] = FS.readdir;
- Module["FS_analyzePath"] = FS.analyzePath;
- ```
- right after
- - `Module["PThread"] = PThread;`
+ - `patch emHdBindings.js -R < fileSystem.patch`
+ 4. Include global export
+ - `echo -e '\nglobalThis["NEEDLE:USD:GET"] = getUsdModule;' >> "emHdBindings.js"`
7. Run `npm start`
8. Go to http://localhost:3003 (or wherever the app is running)
9. Open up Chrome Dev Tools
@@ -172,13 +125,9 @@ NOTE: Origins for these instructions can be found [here](https://github.com/auto
11. Under Authored, you can go through to the pxr files to set breakpoints in the c++ code.
##### Build Script
-There is a build script [here](https://github.com/needle-tools/OpenUSD/blob/needle/feature/wasm-improvements/buildAndMove.sh) which tries to make building easier. Set the mode, build directory and destination directory to deal with the file movement.
-
-Usage: `./buildAndMove.sh --mode release --build-dir ../build-wasm --destination-dir /Users/andrewbeers/git/needle/usd-viewer/public`
-
-NOTE: this does not support patching yet as patching doesn't completely work yet
-NOTE: this does not update CMakeLists.txt for debug mode automatically
+There is a build script [here](https://github.com/needle-tools/OpenUSD/blob/needle/feature/wasm-improvements/buildAndMove.sh) which tries to make building easier. Set the mode, build directory and destination directory to deal with the file movement. This does not update CMakeLists.txt for debug mode automatically. Follow step 2 above to configure this.
+Usage: `./buildAndMove.sh --mode release --build-dir ../build-wasm --destination-dir /Users/andrewbeers/git/needle/usd-viewer/usd-wasm/src/bindings --patch-dir /Users/andrewbeers/git/needle/OpenUSD/pxr/usdImaging/hdEmscripten/patches`
## Origin
diff --git a/usd-wasm/src/bindings-debug/emHdBindings.data b/usd-wasm/src/bindings-debug/emHdBindings.data
new file mode 100644
index 0000000..168e611
--- /dev/null
+++ b/usd-wasm/src/bindings-debug/emHdBindings.data
@@ -0,0 +1,19331 @@
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ "ArResolver": {},
+ "ArDefaultResolver": {
+ "bases": [
+ "ArResolver"
+ ],
+ "implementsContexts": true
+ },
+ "ArPackageResolver": {}
+ }
+ },
+ "LibraryPath": "",
+ "Name": "ar",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+//
+// Copyright 2023 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+////////////////////////////////////////////////////////////////////////
+
+/* ************************************************************************** */
+/* ** ** */
+/* ** This file is generated by a script. ** */
+/* ** ** */
+/* ** Do not edit it directly (unless it is within a CUSTOM CODE section)! ** */
+/* ** Edit hdSchemaDefs.py instead to make changes. ** */
+/* ** ** */
+/* ************************************************************************** */
+
+#include "{{ INCLUDE_PATH }}/{{ FILE_NAME }}.h"
+
+#include "pxr/imaging/hd/retainedDataSource.h"
+
+{%- if IMPL_SCHEMA_INCLUDES is defined -%}
+{%- for t in IMPL_SCHEMA_INCLUDES %}
+#include "{{ t | expand }}.h"
+{%- endfor -%}
+{%- endif %}
+
+#include "pxr/base/trace/trace.h"
+
+// --(BEGIN CUSTOM CODE: Includes)--
+{%- if 'Includes' in CUSTOM_CODE_IMPL %}
+{{ CUSTOM_CODE_IMPL['Includes'] }}
+{%- endif %}
+// --(END CUSTOM CODE: Includes)--
+
+PXR_NAMESPACE_OPEN_SCOPE
+
+TF_DEFINE_PUBLIC_TOKENS({{SCHEMA_CLASS_NAME}}Tokens,
+ {{ SCHEMA_CLASS_NAME|snake }}_TOKENS);
+
+// --(BEGIN CUSTOM CODE: Schema Methods)--
+{%- if 'Schema Methods' in CUSTOM_CODE_IMPL %}
+{{ CUSTOM_CODE_IMPL['Schema Methods'] }}
+{%- endif %}
+// --(END CUSTOM CODE: Schema Methods)--
+
+{%- if GENERIC_MEMBER is defined -%}
+{%- set name, type_name, opt_dict = GENERIC_MEMBER -%}
+{%- if opt_dict.get('GETTER', True) %}
+
+TfTokenVector
+{{ SCHEMA_CLASS_NAME }}::Get{{ name | capitalizeFirst }}Names()
+{%- if VERSION_GUARD_CONST_GETTER %}
+#if HD_API_VERSION >= 66
+ const
+#else
+
+#endif
+{% else %} const
+{% endif -%} {#- if VERSION_GUARD_CONST_GETTER -#}
+{
+ if (_container) {
+ return _container->GetNames();
+ } else {
+ return {};
+ }
+}
+
+{{ type_name}}{% if not type_name.endswith('Schema') %}Handle{% endif %}
+{{ SCHEMA_CLASS_NAME }}::Get{{ name | capitalizeFirst }}(const TfToken &name)
+{%- if VERSION_GUARD_CONST_GETTER %}
+#if HD_API_VERSION >= 66
+ const
+#else
+
+#endif
+{% else %} const
+{% endif -%} {#- if VERSION_GUARD_CONST_GETTER -#}
+{
+ {%- if type_name.endswith('Schema') %}
+ return {{type_name}}(
+ _GetTypedDataSource<{{ type_name | underlyingDataSource}}>(name));
+ {%- else %}
+ return _GetTypedDataSource<{{ type_name | underlyingDataSource}}>(name);
+ {%- endif %}
+}
+{%- endif -%} {# if opt_dict.get('GETTER', True) #}
+{%- endif -%} {# if GENERIC_MEMBER is defiend #}
+
+{%- if MEMBERS is defined -%}
+{%- for name, type_name, opt_dict in MEMBERS -%}
+{%- if opt_dict.get('GETTER', True) %}
+
+{{ type_name}}{% if not type_name.endswith('Schema') %}Handle{% endif %}
+{{ SCHEMA_CLASS_NAME }}::Get{{ name|capitalizeFirst }}()
+{%- if VERSION_GUARD_CONST_GETTER %}
+#if HD_API_VERSION >= 66
+ const
+#else
+
+#endif
+{% else %} const
+{% endif -%} {#- if VERSION_GUARD_CONST_GETTER -#}
+{
+ return {% if type_name.endswith('Schema') %}{{ type_name }}({% endif %}_GetTypedDataSource<{{type_name | underlyingDataSource}}>(
+ {{SCHEMA_CLASS_NAME}}Tokens->{{ name }}){% if type_name.endswith('Schema') %}){% endif %};
+}
+{%- endif -%} {# if opt_dict.get('GETTER', True) #}
+{%- endfor -%}
+{%- endif -%} {# if MEMBERS is defined #}
+
+{%- if GENERIC_MEMBER is defined %}
+
+/*static*/
+HdContainerDataSourceHandle
+{{SCHEMA_CLASS_NAME}}::BuildRetained(
+ const size_t count,
+ const TfToken * const names,
+ const HdDataSourceBaseHandle * const values)
+{
+ return HdRetainedContainerDataSource::New(count, names, values);
+}
+{%- else -%}
+{%- if MEMBERS %}
+
+/*static*/
+HdContainerDataSourceHandle
+{{ SCHEMA_CLASS_NAME }}::BuildRetained(
+{%- for name, type_name, opt_dict in MEMBERS %}
+ const {{type_name | underlyingDataSource}}Handle &{{ name }}{%if loop.last == False %},{% endif -%}
+{%- endfor %}
+)
+{
+ TfToken _names[{{MEMBERS|length}}];
+ HdDataSourceBaseHandle _values[{{MEMBERS|length}}];
+
+ size_t _count = 0;
+{%- for name, type_name, opt_dict in MEMBERS %}
+
+ if ({{name}}) {
+ _names[_count] = {{SCHEMA_CLASS_NAME}}Tokens->{{name}};
+ _values[_count++] = {{name}};
+ }
+{%- endfor %}
+ return HdRetainedContainerDataSource::New(_count, _names, _values);
+}
+
+{%- for name, type_name, opt_dict in MEMBERS %}
+
+{{ SCHEMA_CLASS_NAME }}::Builder &
+{{ SCHEMA_CLASS_NAME }}::Builder::Set{{ name|capitalizeFirst }}(
+ const {{ type_name | underlyingDataSource}}Handle &{{name}})
+{
+ _{{name}} = {{name}};
+ return *this;
+}
+{%- endfor %}
+
+HdContainerDataSourceHandle
+{{ SCHEMA_CLASS_NAME }}::Builder::Build()
+{
+ return {{ SCHEMA_CLASS_NAME }}::BuildRetained(
+{%- for name, type_name, opt_dict in MEMBERS %}
+ _{{ name }}{%if loop.last == False %},{% endif -%}
+{%- endfor %}
+ );
+}
+{%- endif -%} {# else of if MEMBERS #}
+{%- endif -%} {# else of if GENERIC_MEMBER is defined #}
+
+{%- if SCHEMA_TOKEN is defined %}
+
+/*static*/
+{{ SCHEMA_CLASS_NAME }}
+{{ SCHEMA_CLASS_NAME }}::GetFromParent(
+ const HdContainerDataSourceHandle &fromParentContainer)
+{
+ return {{ SCHEMA_CLASS_NAME }}(
+ fromParentContainer
+ ? HdContainerDataSource::Cast(fromParentContainer->Get(
+ {{SCHEMA_CLASS_NAME}}Tokens->{{SCHEMA_TOKEN}}))
+ : nullptr);
+}
+
+/*static*/
+const TfToken &
+{{ SCHEMA_CLASS_NAME }}::GetSchemaToken()
+{
+ return {{SCHEMA_CLASS_NAME}}Tokens->{{SCHEMA_TOKEN}};
+}
+{%- endif -%}
+
+{%- if ADD_DEFAULT_LOCATOR is defined %}
+
+/*static*/
+const HdDataSourceLocator &
+{{ SCHEMA_CLASS_NAME }}::GetDefaultLocator()
+{
+{%- if LOCATOR_PREFIX is defined %}
+ static const HdDataSourceLocator locator =
+ {{ LOCATOR_PREFIX }}.Append(GetSchemaToken());
+{%- else %}
+ static const HdDataSourceLocator locator(GetSchemaToken());
+{%- endif %}
+ return locator;
+}
+{%- endif -%}
+
+{%- if MEMBERS is defined -%}
+{%- for name, type_name, opt_dict in MEMBERS -%}
+{%- if opt_dict['ADD_LOCATOR'] %}
+
+/* static */
+const HdDataSourceLocator &
+{{ SCHEMA_CLASS_NAME }}::Get{{ name |capitalizeFirst }}Locator()
+{
+ static const HdDataSourceLocator locator =
+ GetDefaultLocator().Append(
+ {{SCHEMA_CLASS_NAME}}Tokens->{{name}});
+ return locator;
+}
+{%- endif -%} {# if opt_dict['ADD_LOCATOR'] #}
+{%- endfor -%}
+{%- endif -%} {# if MEMBERS is defined #}
+
+{%- if STATIC_LOCATOR_ACCESSORS is defined -%}
+{%- for name, tokens in STATIC_LOCATOR_ACCESSORS %}
+
+/*static*/
+const HdDataSourceLocator &
+{{ SCHEMA_CLASS_NAME }}::Get{{name|capitalizeFirst}}Locator()
+{
+ static const HdDataSourceLocator locator(
+ {%- for t in tokens -%}
+ {%if t is string %}{{SCHEMA_CLASS_NAME}}Tokens->{{ t }}{% else %}{{ t[0] }}SchemaTokens->{{ t[1] }}{% endif %}{%if loop.last == False %},{% endif -%}
+ {%- endfor %}
+ );
+ return locator;
+}
+{%- endfor -%}
+{%- endif -%} {# if STATIC_LOCATOR_ACCESSORS is defined #}
+
+{%- if STATIC_TOKEN_DATASOURCE_BUILDERS is defined -%}
+{%- for typeName, tokens in STATIC_TOKEN_DATASOURCE_BUILDERS %}
+
+/*static*/
+HdTokenDataSourceHandle
+{{ SCHEMA_CLASS_NAME }}::Build{{typeName|capitalizeFirst}}DataSource(
+ const TfToken &{{typeName}})
+{
+{% for token in tokens %}
+ if ({{typeName}} == {{SCHEMA_CLASS_NAME}}Tokens->{{ token | tokenName }}) {
+ static const HdRetainedTypedSampledDataSource::Handle ds =
+ HdRetainedTypedSampledDataSource::New({{typeName}});
+ return ds;
+ }
+{%- endfor %}
+ // fallback for unknown token
+ return HdRetainedTypedSampledDataSource::New({{typeName}});
+}
+{%- endfor -%}
+{%- endif %} {# if STATIC_TOKEN_DATASOURCE_BUILDERS is defined #}
+
+PXR_NAMESPACE_CLOSE_SCOPE
+//
+// Copyright 2023 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+////////////////////////////////////////////////////////////////////////
+
+/* ************************************************************************** */
+/* ** ** */
+/* ** This file is generated by a script. ** */
+/* ** ** */
+/* ** Do not edit it directly (unless it is within a CUSTOM CODE section)! ** */
+/* ** Edit hdSchemaDefs.py instead to make changes. ** */
+/* ** ** */
+/* ************************************************************************** */
+
+#ifndef {{ HEADER_GUARD|snake }}_{{ FILE_NAME|snake }}_H
+#define {{ HEADER_GUARD|snake }}_{{ FILE_NAME|snake }}_H
+
+/// \file
+
+#include "{{ INCLUDE_PATH }}/api.h"
+
+{%- if SCHEMA_INCLUDES is defined -%}
+{%- for t in SCHEMA_INCLUDES %}
+#include "{{ t | expand }}.h"
+{%- endfor -%}
+{%- endif %}
+
+#include "pxr/imaging/hd/schema.h"
+{%- if VERSION_GUARD_CONST_GETTER %}
+#include "pxr/imaging/hd/version.h"
+{%- endif %}
+
+// --(BEGIN CUSTOM CODE: Includes)--
+{%- if 'Includes' in CUSTOM_CODE_HEADER %}
+{{ CUSTOM_CODE_HEADER['Includes'] }}
+{%- endif %}
+// --(END CUSTOM CODE: Includes)--
+
+PXR_NAMESPACE_OPEN_SCOPE
+
+// --(BEGIN CUSTOM CODE: Declares)--
+{%- if 'Declares' in CUSTOM_CODE_HEADER %}
+{{ CUSTOM_CODE_HEADER['Declares'] }}
+{%- endif %}
+// --(END CUSTOM CODE: Declares)--
+
+#define {{ SCHEMA_CLASS_NAME|snake }}_TOKENS \
+
+{%- if SCHEMA_TOKEN is defined %}
+ ({{SCHEMA_TOKEN}}) \
+{%- endif -%}
+
+{%- if MEMBERS is defined -%}
+{%- for name, type_name, opt_dict in MEMBERS -%}
+{%- if SCHEMA_TOKEN is not defined or name != SCHEMA_TOKEN %}
+ ({{name}}) \
+{%- endif -%}
+{%- endfor -%}
+{%- endif -%}
+
+{%- if EXTRA_TOKENS is defined -%}
+{%- for t in EXTRA_TOKENS %}
+ ({{t}}) \
+{%- endfor -%}
+{%- endif -%}
+
+{%- if STATIC_TOKEN_DATASOURCE_BUILDERS is defined -%}
+{%- for typeName, tokens in STATIC_TOKEN_DATASOURCE_BUILDERS -%}
+{%- for token in tokens %}
+ ({{token}}) \
+{%- endfor -%}
+{%- endfor -%}
+{%- endif %}
+
+TF_DECLARE_PUBLIC_TOKENS({{SCHEMA_CLASS_NAME}}Tokens, {{ LIBRARY_API }},
+ {{ SCHEMA_CLASS_NAME|snake }}_TOKENS);
+
+//-----------------------------------------------------------------------------
+
+{% if DOC is defined -%}
+{{ DOC | expand | makeComment }}
+{%- endif %}
+class {{ SCHEMA_CLASS_NAME }} : public HdSchema
+{
+public:
+ /// \name Schema retrieval
+ /// @{
+
+ {{ SCHEMA_CLASS_NAME }}(HdContainerDataSourceHandle container)
+ : HdSchema(container) {}
+
+{%- if SCHEMA_TOKEN is defined %}
+
+ /// Retrieves a container data source with the schema's default name token
+ /// "{{ SCHEMA_TOKEN }}" from the parent container and constructs a
+ /// {{SCHEMA_CLASS_NAME}} instance.
+ /// Because the requested container data source may not exist, the result
+ /// should be checked with IsDefined() or a bool comparison before use.
+ {{ LIBRARY_API }}
+ static {{ SCHEMA_CLASS_NAME }} GetFromParent(
+ const HdContainerDataSourceHandle &fromParentContainer);
+{%- endif %}
+
+ /// @}
+
+// --(BEGIN CUSTOM CODE: Schema Methods)--
+{%- if 'Schema Methods' in CUSTOM_CODE_HEADER %}
+{{ CUSTOM_CODE_HEADER['Schema Methods'] }}
+{%- endif %}
+// --(END CUSTOM CODE: Schema Methods)--
+
+ /// \name Member accessor
+ /// @{
+{%- if GENERIC_MEMBER is defined -%}
+{%- set name, type_name, opt_dict = GENERIC_MEMBER -%}
+{%- if opt_dict.get('GETTER', True) %}
+
+ {{ LIBRARY_API }}
+ TfTokenVector Get{{ name | capitalizeFirst }}Names()
+{%- if VERSION_GUARD_CONST_GETTER %}
+#if HD_API_VERSION >= 66
+ const;
+#else
+ ;
+#endif
+{% else %} const;
+{% endif -%} {# if VERSION_GUARD_CONST_GETTER #}
+
+{%- if 'DOC' in opt_dict -%}
+{%- for l in (opt_dict['DOC'].split()|join(' ')|wordwrap(width=70)).split('\n') %}
+ /// {{l}}
+{%- endfor -%}
+{%- endif %}
+ {{ LIBRARY_API }}
+ {{ type_name}}{% if not type_name.endswith('Schema') %}Handle{% endif %} Get{{ name|capitalizeFirst }}(const TfToken &name)
+{%- if VERSION_GUARD_CONST_GETTER %}
+#if HD_API_VERSION >= 66
+ const;
+#else
+ ;
+#endif
+{% else %} const;
+{%- endif -%} {# if VERSION_GUARD_CONST_GETTER #}
+{%- endif -%} {# if opt_dict.get('GETTER', True) #}
+{%- endif -%} {# if GENERIC_MEMBER is defined #}
+
+{%- if MEMBERS is defined -%}
+{%- for name, type_name, opt_dict in MEMBERS -%}
+{%- if opt_dict.get('GETTER', True) %}
+{% if 'DOC' in opt_dict -%}
+{%- for l in (opt_dict['DOC'].split()|join(' ')|wordwrap(width=70)).split('\n') %}
+ /// {{l}}
+{%- endfor -%}
+{%- endif %}
+ {{ LIBRARY_API }}
+ {{ type_name}}{% if not type_name.endswith('Schema') %}Handle{% endif %} Get{{ name|capitalizeFirst }}()
+{%- if VERSION_GUARD_CONST_GETTER %}
+#if HD_API_VERSION >= 66
+ const;
+#else
+ ;
+#endif
+{% else %} const;
+{%- endif -%} {# if VERSION_GUARD_CONST_GETTER #}
+{%- endif -%} {# if opt_dict.get('GETTER', True) #}
+{%- endfor -%}
+{%- endif %} {# if MEMBERS is defined #}
+
+ /// @}
+
+{%- if SCHEMA_TOKEN is defined %}
+
+ /// \name Schema location
+ /// @{
+
+ /// Returns a token where the container representing this schema is found in
+ /// a container by default.
+ {{ LIBRARY_API }}
+ static const TfToken &GetSchemaToken();
+
+{%- if ADD_DEFAULT_LOCATOR is defined %}
+
+ /// Returns an HdDataSourceLocator (relative to the prim-level data source)
+ /// where the container representing this schema is found by default.
+ {{ LIBRARY_API }}
+ static const HdDataSourceLocator &GetDefaultLocator();
+{%- endif %}
+
+ /// @}
+{%- endif -%}
+
+
+{%- if self.member_locators() | trim %}
+
+ /// \name Data source locators for members
+ ///
+ /// The following methods return an HdDataSourceLocator (relative to the
+ /// prim-level data source) where the data source for a member can be found.
+ ///
+ /// This is often useful for checking intersection against the
+ /// HdDataSourceLocatorSet sent with HdDataSourceObserver::PrimsDirtied.
+ /// @{
+
+{%- block member_locators -%}
+{%- if MEMBERS is defined -%}
+{%- for name, type_name, opt_dict in MEMBERS -%}
+{%- if opt_dict['ADD_LOCATOR'] %}
+
+ /// Prim-level relative data source locator to locate {{ name }}.
+ {{ LIBRARY_API }}
+ static const HdDataSourceLocator &Get{{ name |capitalizeFirst }}Locator();
+{%- endif -%}
+{%- endfor -%}
+{%- endif -%} {# if MEMBERS is defined #}
+
+{%- if STATIC_LOCATOR_ACCESSORS is defined -%}
+{%- for name, tokens in STATIC_LOCATOR_ACCESSORS %}
+
+ /// Additional prim-level relative data source locator to locate
+ /// {{ name }}.
+ {{ LIBRARY_API }}
+ static const HdDataSourceLocator &Get{{name | capitalizeFirst}}Locator();
+{%- endfor -%}
+{%- endif -%}
+{%- endblock member_locators %}
+ /// @}
+{%- endif %} {# if self.member_locators() | trim #}
+
+ /// \name Schema construction
+ /// @{
+
+{%- if GENERIC_MEMBER is defined %}
+ {{ LIBRARY_API }}
+ static HdContainerDataSourceHandle
+ BuildRetained(
+ size_t count,
+ const TfToken *names,
+ const HdDataSourceBaseHandle *values);
+{%- else -%}
+{%- if MEMBERS %}
+
+ /// \deprecated Use Builder instead.
+ ///
+ /// Builds a container data source which includes the provided child data
+ /// sources. Parameters with nullptr values are excluded. This is a
+ /// low-level interface. For cases in which it's desired to define
+ /// the container with a sparse set of child fields, the Builder class
+ /// is often more convenient and readable.
+ {{ LIBRARY_API }}
+ static HdContainerDataSourceHandle
+ BuildRetained(
+{%- for name, type_name, opt_dict in MEMBERS %}
+ const {{ type_name | underlyingDataSource }}Handle &{{ name }}{%if loop.last == False %},{% endif -%}
+{%- endfor %}
+ );
+
+ /// \class {{ SCHEMA_CLASS_NAME }}::Builder
+ ///
+ /// Utility class for setting sparse sets of child data source fields to be
+ /// filled as arguments into BuildRetained. Because all setter methods
+ /// return a reference to the instance, this can be used in the "builder
+ /// pattern" form.
+ class Builder
+ {
+ public:
+{%- for name, type_name, opt_dict in MEMBERS %}
+ {{ LIBRARY_API }}
+ Builder &Set{{ name|capitalizeFirst }}(
+ const {{ type_name | underlyingDataSource}}Handle &{{name}});
+{%- endfor %}
+
+ /// Returns a container data source containing the members set thus far.
+ {{ LIBRARY_API }}
+ HdContainerDataSourceHandle Build();
+
+ private:
+{%- for name, type_name, opt_dict in MEMBERS %}
+ {{ type_name | underlyingDataSource}}Handle _{{name}};
+{%- endfor %}
+
+ };
+{%- endif -%} {# else of if MEMBERS #}
+{%- endif -%} {# else of if GENERIC_MEMBER is defined #}
+
+{%- if STATIC_TOKEN_DATASOURCE_BUILDERS is defined -%}
+{%- for typeName, tokens in STATIC_TOKEN_DATASOURCE_BUILDERS %}
+
+ /// Returns token data source for use as {{typeName}} value.
+ ///
+ /// The following values will be stored statically and reused for future
+ /// calls:
+{%- for token in tokens %}
+ /// - {{SCHEMA_CLASS_NAME}}Tokens->{{ token | tokenName }}
+{%- endfor %}
+ {{ LIBRARY_API }}
+ static HdTokenDataSourceHandle Build{{typeName|capitalizeFirst}}DataSource(
+ const TfToken &{{typeName}});
+{%- endfor -%}
+{%- endif %}
+
+ /// @}
+};
+
+PXR_NAMESPACE_CLOSE_SCOPE
+
+#endif
+{
+ "Plugins": [
+ {
+ "Info": {},
+ "LibraryPath": "",
+ "Name": "hd",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ "HttpResolver": {
+ "bases": ["ArDefaultResolver"]
+ }
+ }
+ },
+ "Name": "HttpResolver",
+ "LibraryPath": "",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ "NdrDiscoveryPlugin": {},
+ "_NdrFilesystemDiscoveryPlugin" : {
+ "bases": ["NdrDiscoveryPlugin"],
+ "displayName": "Filesystem Discovery"
+ },
+ "NdrParserPlugin": {}
+ }
+ },
+ "LibraryPath": "",
+ "Name": "ndr",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+{
+ "Includes": [
+ "*/resources/"
+ ]
+}
+{
+ "Plugins": [
+ {
+ "Info": {
+ "SdfMetadata": {
+ "payloadAssetDependencies": {
+ "appliesTo": "prims",
+ "displayGroup": "Pipeline",
+ "type": "asset[]"
+ }
+ },
+ "Types": {
+ "SdfFileFormat": {
+ "displayName": "Sdf file format base class",
+ "target": "sdf"
+ },
+ "SdfTextFileFormat": {
+ "bases": [
+ "SdfFileFormat"
+ ],
+ "displayName": "Sdf Text File Format",
+ "extensions": [
+ "sdf"
+ ],
+ "formatId": "sdf"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "sdf",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+//
+// Copyright 2017 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+#ifndef {{ Upper(libraryName) }}_API_H
+#define {{ Upper(libraryName) }}_API_H
+
+#include "pxr/base/arch/export.h"
+
+#if defined(PXR_STATIC)
+# define {{ Upper(libraryName) }}_API
+# define {{ Upper(libraryName) }}_API_TEMPLATE_CLASS(...)
+# define {{ Upper(libraryName) }}_API_TEMPLATE_STRUCT(...)
+# define {{ Upper(libraryName) }}_LOCAL
+#else
+# if defined({{ Upper(libraryName) }}_EXPORTS)
+# define {{ Upper(libraryName) }}_API ARCH_EXPORT
+# define {{ Upper(libraryName) }}_API_TEMPLATE_CLASS(...) ARCH_EXPORT_TEMPLATE(class, __VA_ARGS__)
+# define {{ Upper(libraryName) }}_API_TEMPLATE_STRUCT(...) ARCH_EXPORT_TEMPLATE(struct, __VA_ARGS__)
+# else
+# define {{ Upper(libraryName) }}_API ARCH_IMPORT
+# define {{ Upper(libraryName) }}_API_TEMPLATE_CLASS(...) ARCH_IMPORT_TEMPLATE(class, __VA_ARGS__)
+# define {{ Upper(libraryName) }}_API_TEMPLATE_STRUCT(...) ARCH_IMPORT_TEMPLATE(struct, __VA_ARGS__)
+# endif
+# define {{ Upper(libraryName) }}_LOCAL ARCH_HIDDEN
+#endif
+
+#endif
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ }
+ },
+ "LibraryPath": "@PLUG_INFO_LIBRARY_PATH@",
+ "Name": "{{ libraryName }}",
+ "ResourcePath": "@PLUG_INFO_RESOURCE_PATH@",
+ "Root": "@PLUG_INFO_ROOT@",
+ "Type": "library"
+ }
+ ]
+}
+//
+// Copyright 2016 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+#include "{{ libraryPath }}/{{ cls.GetHeaderFile() }}"
+#include "pxr/usd/usd/schemaRegistry.h"
+#include "pxr/usd/usd/typed.h"
+
+#include "pxr/usd/sdf/types.h"
+#include "pxr/usd/sdf/assetPath.h"
+
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+#include "pxr/base/tf/staticTokens.h"
+
+{% endif %}
+{% if useExportAPI %}
+{{ namespaceOpen }}
+
+{% endif %}
+// Register the schema with the TfType system.
+TF_REGISTRY_FUNCTION(TfType)
+{
+ TfType::Define<{{ cls.cppClassName }},
+ TfType::Bases< {{ cls.parentCppClassName }} > >();
+
+{% if cls.isConcrete %}
+ // Register the usd prim typename as an alias under UsdSchemaBase. This
+ // enables one to call
+ // TfType::Find().FindDerivedByName("{{ cls.usdPrimTypeName }}")
+ // to find TfType<{{ cls.cppClassName }}>, which is how IsA queries are
+ // answered.
+ TfType::AddAlias("{{ cls.usdPrimTypeName }}");
+{% endif %}
+}
+
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+TF_DEFINE_PRIVATE_TOKENS(
+ _schemaTokens,
+ ({{ cls.propertyNamespacePrefix }})
+);
+
+{% endif %}
+/* virtual */
+{{ cls.cppClassName }}::~{{ cls.cppClassName }}()
+{
+}
+
+{% if not cls.isAPISchemaBase %}
+/* static */
+{{ cls.cppClassName }}
+{{ cls.cppClassName }}::Get(const UsdStagePtr &stage, const SdfPath &path)
+{
+ if (!stage) {
+ TF_CODING_ERROR("Invalid stage");
+ return {{ cls.cppClassName }}();
+ }
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ TfToken name;
+ if (!Is{{ cls.usdPrimTypeName }}Path(path, &name)) {
+ TF_CODING_ERROR("Invalid {{ cls.propertyNamespacePrefix }} path <%s>.", path.GetText());
+ return {{ cls.cppClassName }}();
+ }
+ return {{ cls.cppClassName }}(stage->GetPrimAtPath(path.GetPrimPath()), name);
+{% else %}
+ return {{ cls.cppClassName }}(stage->GetPrimAtPath(path));
+{% endif %}
+}
+
+{% if cls.isMultipleApply %}
+{{ cls.cppClassName }}
+{{ cls.cppClassName }}::Get(const UsdPrim &prim, const TfToken &name)
+{
+ return {{ cls.cppClassName }}(prim, name);
+}
+
+/* static */
+std::vector<{{ cls.cppClassName }}>
+{{ cls.cppClassName }}::GetAll(const UsdPrim &prim)
+{
+ std::vector<{{ cls.cppClassName }}> schemas;
+
+ for (const auto &schemaName :
+ UsdAPISchemaBase::_GetMultipleApplyInstanceNames(prim, _GetStaticTfType())) {
+ schemas.emplace_back(prim, schemaName);
+ }
+
+ return schemas;
+}
+
+{% endif %}
+{% endif %}
+{% if cls.isConcrete %}
+/* static */
+{{ cls.cppClassName }}
+{{ cls.cppClassName }}::Define(
+ const UsdStagePtr &stage, const SdfPath &path)
+{
+ static TfToken usdPrimTypeName("{{ cls.usdPrimTypeName }}");
+ if (!stage) {
+ TF_CODING_ERROR("Invalid stage");
+ return {{ cls.cppClassName }}();
+ }
+ return {{ cls.cppClassName }}(
+ stage->DefinePrim(path, usdPrimTypeName));
+}
+{% endif %}
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+
+/* static */
+bool
+{{ cls.cppClassName }}::IsSchemaPropertyBaseName(const TfToken &baseName)
+{
+ static TfTokenVector attrsAndRels = {
+{% for attrName in cls.attrOrder %}
+{% set attr = cls.attrs[attrName] %}
+ UsdSchemaRegistry::GetMultipleApplyNameTemplateBaseName(
+ {{ tokensPrefix }}Tokens->{{ attr.name }}),
+{% endfor %}
+{% for relName in cls.relOrder %}
+{% set rel = cls.rels[relName] %}
+ UsdSchemaRegistry::GetMultipleApplyNameTemplateBaseName(
+ {{ tokensPrefix }}Tokens->{{ rel.name }}),
+{% endfor %}
+ };
+
+ return find(attrsAndRels.begin(), attrsAndRels.end(), baseName)
+ != attrsAndRels.end();
+}
+
+/* static */
+bool
+{{ cls.cppClassName }}::Is{{ cls.usdPrimTypeName }}Path(
+ const SdfPath &path, TfToken *name)
+{
+ if (!path.IsPropertyPath()) {
+ return false;
+ }
+
+ std::string propertyName = path.GetName();
+ TfTokenVector tokens = SdfPath::TokenizeIdentifierAsTokens(propertyName);
+
+ // The baseName of the {{ cls.usdPrimTypename }} path can't be one of the
+ // schema properties. We should validate this in the creation (or apply)
+ // API.
+ TfToken baseName = *tokens.rbegin();
+ if (IsSchemaPropertyBaseName(baseName)) {
+ return false;
+ }
+
+ if (tokens.size() >= 2
+ && tokens[0] == _schemaTokens->{{ cls.propertyNamespacePrefix }}) {
+ *name = TfToken(propertyName.substr(
+ _schemaTokens->{{ cls.propertyNamespacePrefix }}.GetString().size() + 1));
+ return true;
+ }
+
+ return false;
+}
+{% endif %}
+
+/* virtual */
+UsdSchemaKind {{ cls.cppClassName }}::_GetSchemaKind() const
+{
+ return {{ cls.cppClassName }}::schemaKind;
+}
+{% if cls.isAppliedAPISchema %}
+
+/* static */
+bool
+{% if not cls.isMultipleApply %}
+{{ cls.cppClassName }}::CanApply(
+ const UsdPrim &prim, std::string *whyNot)
+{% else %}
+{{ cls.cppClassName }}::CanApply(
+ const UsdPrim &prim, const TfToken &name, std::string *whyNot)
+{% endif %}
+{
+{% if cls.isMultipleApply %}
+ return prim.CanApplyAPI<{{ cls.cppClassName }}>(name, whyNot);
+{% else %}
+ return prim.CanApplyAPI<{{ cls.cppClassName }}>(whyNot);
+{% endif %}
+}
+
+/* static */
+{{ cls.cppClassName }}
+{% if not cls.isMultipleApply %}
+{{ cls.cppClassName }}::Apply(const UsdPrim &prim)
+{% else %}
+{{ cls.cppClassName }}::Apply(const UsdPrim &prim, const TfToken &name)
+{% endif %}
+{
+{% if cls.isMultipleApply %}
+ if (prim.ApplyAPI<{{ cls.cppClassName }}>(name)) {
+ return {{ cls.cppClassName }}(prim, name);
+ }
+{% else %}
+ if (prim.ApplyAPI<{{ cls.cppClassName }}>()) {
+ return {{ cls.cppClassName }}(prim);
+ }
+{% endif %}
+ return {{ cls.cppClassName }}();
+}
+{% endif %}
+
+/* static */
+const TfType &
+{{ cls.cppClassName }}::_GetStaticTfType()
+{
+ static TfType tfType = TfType::Find<{{ cls.cppClassName }}>();
+ return tfType;
+}
+
+/* static */
+bool
+{{ cls.cppClassName }}::_IsTypedSchema()
+{
+ static bool isTyped = _GetStaticTfType().IsA();
+ return isTyped;
+}
+
+/* virtual */
+const TfType &
+{{ cls.cppClassName }}::_GetTfType() const
+{
+ return _GetStaticTfType();
+}
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+
+/// Returns the property name prefixed with the correct namespace prefix, which
+/// is composed of the the API's propertyNamespacePrefix metadata and the
+/// instance name of the API.
+static inline
+TfToken
+_GetNamespacedPropertyName(const TfToken instanceName, const TfToken propName)
+{
+ return UsdSchemaRegistry::MakeMultipleApplyNameInstance(propName, instanceName);
+}
+{% endif %}
+
+{% for attrName in cls.attrOrder %}
+{% set attr = cls.attrs[attrName] %}
+{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
+{% if attr.apiName != '' %}
+{% if attr.apiGet != "custom" %}
+UsdAttribute
+{{ cls.cppClassName }}::Get{{ Proper(attr.apiName) }}Attr() const
+{
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ return GetPrim().GetAttribute(
+ _GetNamespacedPropertyName(
+ GetName(),
+ {{ tokensPrefix }}Tokens->{{ attr.name }}));
+{% else %}
+ return GetPrim().GetAttribute({{ tokensPrefix }}Tokens->{{ attr.name }});
+{% endif %}
+}
+{% endif %}
+
+UsdAttribute
+{{ cls.cppClassName }}::Create{{ Proper(attr.apiName) }}Attr(VtValue const &defaultValue, bool writeSparsely) const
+{
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ return UsdSchemaBase::_CreateAttr(
+ _GetNamespacedPropertyName(
+ GetName(),
+ {{ tokensPrefix }}Tokens->{{ attr.name }}),
+{% else %}
+ return UsdSchemaBase::_CreateAttr({{ tokensPrefix }}Tokens->{{ attr.name }},
+{% endif %}
+ {{ attr.usdType }},
+ /* custom = */ {{ "true" if attr.custom else "false" }},
+ {{ attr.variability }},
+ defaultValue,
+ writeSparsely);
+}
+
+{% endif %}
+{% endfor %}
+{% for relName in cls.relOrder %}
+{% set rel = cls.rels[relName] %}
+{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
+{% if rel.apiName != '' %}
+{% if rel.apiGet != "custom" %}
+UsdRelationship
+{{ cls.cppClassName }}::Get{{ Proper(rel.apiName) }}Rel() const
+{
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ return GetPrim().GetRelationship(
+ _GetNamespacedPropertyName(
+ GetName(),
+ {{ tokensPrefix }}Tokens->{{ rel.name }}));
+{% else %}
+ return GetPrim().GetRelationship({{ tokensPrefix }}Tokens->{{ rel.name }});
+{% endif %}
+}
+{% endif %}
+
+UsdRelationship
+{{ cls.cppClassName }}::Create{{ Proper(rel.apiName) }}Rel() const
+{
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ return GetPrim().CreateRelationship(
+ _GetNamespacedPropertyName(
+ GetName(),
+ {{ tokensPrefix }}Tokens->{{ rel.name }}),
+{% else %}
+ return GetPrim().CreateRelationship({{ tokensPrefix }}Tokens->{{rel.name}},
+{% endif %}
+ /* custom = */ {{ "true" if rel.custom else "false" }});
+}
+
+{% endif %}
+{% endfor %}
+{% if cls.attrOrder|length > 0 %}
+namespace {
+static inline TfTokenVector
+_ConcatenateAttributeNames(const TfTokenVector& left,const TfTokenVector& right)
+{
+ TfTokenVector result;
+ result.reserve(left.size() + right.size());
+ result.insert(result.end(), left.begin(), left.end());
+ result.insert(result.end(), right.begin(), right.end());
+ return result;
+}
+}
+
+{% endif %}
+/*static*/
+const TfTokenVector&
+{{ cls.cppClassName }}::GetSchemaAttributeNames(bool includeInherited)
+{
+{% if cls.attrOrder|length > 0 %}
+ static TfTokenVector localNames = {
+{% for attrName in cls.attrOrder %}
+{% set attr = cls.attrs[attrName] %}
+{% if attr.apiName != '' %}
+ {{ tokensPrefix }}Tokens->{{ attr.name }},
+{% endif %}
+{% endfor %}
+ };
+ static TfTokenVector allNames =
+ _ConcatenateAttributeNames(
+ {{ cls.parentCppClassName }}::GetSchemaAttributeNames(true),
+ localNames);
+{% else %}
+ static TfTokenVector localNames;
+ static TfTokenVector allNames =
+ {{ cls.parentCppClassName }}::GetSchemaAttributeNames(true);
+{% endif %}
+
+ if (includeInherited)
+ return allNames;
+ else
+ return localNames;
+}
+
+{% if cls.isMultipleApply %}
+/*static*/
+TfTokenVector
+{{ cls.cppClassName }}::GetSchemaAttributeNames(
+ bool includeInherited, const TfToken &instanceName)
+{
+ const TfTokenVector &attrNames = GetSchemaAttributeNames(includeInherited);
+ if (instanceName.IsEmpty()) {
+ return attrNames;
+ }
+ TfTokenVector result;
+ result.reserve(attrNames.size());
+ for (const TfToken &attrName : attrNames) {
+ result.push_back(
+ UsdSchemaRegistry::MakeMultipleApplyNameInstance(attrName, instanceName));
+ }
+ return result;
+}
+
+{% endif %}
+{% if useExportAPI %}
+{{ namespaceClose }}
+
+{% endif %}
+// ===================================================================== //
+// Feel free to add custom code below this line. It will be preserved by
+// the code generator.
+{% if useExportAPI %}
+//
+// Just remember to wrap code in the appropriate delimiters:
+// '{{ namespaceOpen }}', '{{ namespaceClose }}'.
+{% endif %}
+// ===================================================================== //
+// --(BEGIN CUSTOM CODE)--
+
+//
+// Copyright 2016 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+#ifndef {{ Upper(libraryName) }}_GENERATED_{{ Upper(cls.className) }}_H
+#define {{ Upper(libraryName) }}_GENERATED_{{ Upper(cls.className) }}_H
+
+/// \file {{ libraryName }}/{{ cls.GetHeaderFile() }}
+
+{% if useExportAPI %}
+#include "pxr/pxr.h"
+#include "{{ libraryPath }}/api.h"
+{% endif %}
+#include "{{ cls.parentLibPath }}/{{ cls.GetParentHeaderFile() }}"
+#include "pxr/usd/usd/prim.h"
+#include "pxr/usd/usd/stage.h"
+{% if cls.tokens -%}
+#include "{{ libraryPath }}/tokens.h"
+{% endif %}
+{% if cls.extraIncludes -%}
+{{ cls.extraIncludes }}
+{% endif %}
+
+#include "pxr/base/vt/value.h"
+
+#include "pxr/base/gf/vec3d.h"
+#include "pxr/base/gf/vec3f.h"
+#include "pxr/base/gf/matrix4d.h"
+
+#include "pxr/base/tf/token.h"
+#include "pxr/base/tf/type.h"
+
+{% if useExportAPI %}
+{{ namespaceOpen }}
+
+{% endif %}
+class SdfAssetPath;
+
+// -------------------------------------------------------------------------- //
+// {{ Upper(cls.usdPrimTypeName) }}{{' ' * (74 - cls.usdPrimTypeName|count)}} //
+// -------------------------------------------------------------------------- //
+
+/// \class {{ cls.cppClassName }}
+///
+{% if cls.doc -%}
+/// {{ cls.doc }}
+{% endif %}
+{% if cls.doc and hasTokenAttrs -%}
+///
+{%endif%}
+{% if hasTokenAttrs -%}
+/// For any described attribute \em Fallback \em Value or \em Allowed \em Values below
+/// that are text/tokens, the actual token is published and defined in \ref {{ tokensPrefix }}Tokens.
+/// So to set an attribute to the value "rightHanded", use {{ tokensPrefix }}Tokens->rightHanded
+/// as the value.
+{% endif %}
+///
+class {{ cls.cppClassName }} : public {{ cls.parentCppClassName }}
+{
+public:
+ /// Compile time constant representing what kind of schema this class is.
+ ///
+ /// \sa UsdSchemaKind
+ static const UsdSchemaKind schemaKind = {{cls.schemaKindEnumValue }};
+
+{% if cls.isMultipleApply %}
+ /// Construct a {{ cls.cppClassName }} on UsdPrim \p prim with
+ /// name \p name . Equivalent to
+ /// {{ cls.cppClassName }}::Get(
+ /// prim.GetStage(),
+ /// prim.GetPath().AppendProperty(
+ /// "{{ cls.propertyNamespacePrefix }}:name"));
+ ///
+ /// for a \em valid \p prim, but will not immediately throw an error for
+ /// an invalid \p prim
+ explicit {{ cls.cppClassName }}(
+ const UsdPrim& prim=UsdPrim(), const TfToken &name=TfToken())
+ : {{ cls.parentCppClassName }}(prim, /*instanceName*/ name)
+ { }
+
+ /// Construct a {{ cls.cppClassName }} on the prim held by \p schemaObj with
+ /// name \p name. Should be preferred over
+ /// {{ cls.cppClassName }}(schemaObj.GetPrim(), name), as it preserves
+ /// SchemaBase state.
+ explicit {{ cls.cppClassName }}(
+ const UsdSchemaBase& schemaObj, const TfToken &name)
+ : {{ cls.parentCppClassName }}(schemaObj, /*instanceName*/ name)
+ { }
+{% else %}
+ /// Construct a {{ cls.cppClassName }} on UsdPrim \p prim .
+ /// Equivalent to {{ cls.cppClassName }}::Get(prim.GetStage(), prim.GetPath())
+ /// for a \em valid \p prim, but will not immediately throw an error for
+ /// an invalid \p prim
+ explicit {{ cls.cppClassName }}(const UsdPrim& prim=UsdPrim())
+ : {{ cls.parentCppClassName }}(prim)
+ {
+ }
+
+ /// Construct a {{ cls.cppClassName }} on the prim held by \p schemaObj .
+ /// Should be preferred over {{ cls.cppClassName }}(schemaObj.GetPrim()),
+ /// as it preserves SchemaBase state.
+ explicit {{ cls.cppClassName }}(const UsdSchemaBase& schemaObj)
+ : {{ cls.parentCppClassName }}(schemaObj)
+ {
+ }
+{% endif %}
+
+ /// Destructor.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ virtual ~{{ cls.cppClassName }}() {%- if cls.isAPISchemaBase %} = 0{% endif %};
+
+ /// Return a vector of names of all pre-declared attributes for this schema
+ /// class and all its ancestor classes. Does not include attributes that
+ /// may be authored by custom/extended methods of the schemas involved.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static const TfTokenVector &
+ GetSchemaAttributeNames(bool includeInherited=true);
+{% if cls.isMultipleApply %}
+
+ /// Return a vector of names of all pre-declared attributes for this schema
+ /// class and all its ancestor classes for a given instance name. Does not
+ /// include attributes that may be authored by custom/extended methods of
+ /// the schemas involved. The names returned will have the proper namespace
+ /// prefix.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static TfTokenVector
+ GetSchemaAttributeNames(bool includeInherited, const TfToken &instanceName);
+
+ /// Returns the name of this multiple-apply schema instance
+ TfToken GetName() const {
+ return _GetInstanceName();
+ }
+{% endif %}
+{% if not cls.isAPISchemaBase %}
+
+ /// Return a {{ cls.cppClassName }} holding the prim adhering to this
+ /// schema at \p path on \p stage. If no prim exists at \p path on
+ /// \p stage, or if the prim at that path does not adhere to this schema,
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ /// return an invalid schema object. \p path must be of the format
+ /// .{{ cls.propertyNamespacePrefix }}:name .
+ ///
+ /// This is shorthand for the following:
+ ///
+ /// \code
+ /// TfToken name = SdfPath::StripNamespace(path.GetToken());
+ /// {{ cls.cppClassName }}(
+ /// stage->GetPrimAtPath(path.GetPrimPath()), name);
+ /// \endcode
+{% else %}
+ /// return an invalid schema object. This is shorthand for the following:
+ ///
+ /// \code
+ /// {{ cls.cppClassName }}(stage->GetPrimAtPath(path));
+ /// \endcode
+{% endif %}
+ ///
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static {{ cls.cppClassName }}
+ Get(const UsdStagePtr &stage, const SdfPath &path);
+{% if cls.isMultipleApply %}
+
+ /// Return a {{ cls.cppClassName }} with name \p name holding the
+ /// prim \p prim. Shorthand for {{ cls.cppClassName }}(prim, name);
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static {{ cls.cppClassName }}
+ Get(const UsdPrim &prim, const TfToken &name);
+
+ /// Return a vector of all named instances of {{ cls.cppClassName }} on the
+ /// given \p prim.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static std::vector<{{ cls.cppClassName }}>
+ GetAll(const UsdPrim &prim);
+{% endif %}
+{% endif %}
+
+{% if cls.isConcrete %}
+ /// Attempt to ensure a \a UsdPrim adhering to this schema at \p path
+ /// is defined (according to UsdPrim::IsDefined()) on this stage.
+ ///
+ /// If a prim adhering to this schema at \p path is already defined on this
+ /// stage, return that prim. Otherwise author an \a SdfPrimSpec with
+ /// \a specifier == \a SdfSpecifierDef and this schema's prim type name for
+ /// the prim at \p path at the current EditTarget. Author \a SdfPrimSpec s
+ /// with \p specifier == \a SdfSpecifierDef and empty typeName at the
+ /// current EditTarget for any nonexistent, or existing but not \a Defined
+ /// ancestors.
+ ///
+ /// The given \a path must be an absolute prim path that does not contain
+ /// any variant selections.
+ ///
+ /// If it is impossible to author any of the necessary PrimSpecs, (for
+ /// example, in case \a path cannot map to the current UsdEditTarget's
+ /// namespace) issue an error and return an invalid \a UsdPrim.
+ ///
+ /// Note that this method may return a defined prim whose typeName does not
+ /// specify this schema class, in case a stronger typeName opinion overrides
+ /// the opinion at the current EditTarget.
+ ///
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static {{ cls.cppClassName }}
+ Define(const UsdStagePtr &stage, const SdfPath &path);
+{% endif %}
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ /// Checks if the given name \p baseName is the base name of a property
+ /// of {{ cls.usdPrimTypeName }}.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static bool
+ IsSchemaPropertyBaseName(const TfToken &baseName);
+
+ /// Checks if the given path \p path is of an API schema of type
+ /// {{ cls.usdPrimTypeName }}. If so, it stores the instance name of
+ /// the schema in \p name and returns true. Otherwise, it returns false.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static bool
+ Is{{ cls.usdPrimTypeName }}Path(const SdfPath &path, TfToken *name);
+{% endif %}
+{% if cls.isAppliedAPISchema and not cls.isMultipleApply %}
+
+ /// Returns true if this single-apply API schema can be applied to
+ /// the given \p prim. If this schema can not be a applied to the prim,
+ /// this returns false and, if provided, populates \p whyNot with the
+ /// reason it can not be applied.
+ ///
+ /// Note that if CanApply returns false, that does not necessarily imply
+ /// that calling Apply will fail. Callers are expected to call CanApply
+ /// before calling Apply if they want to ensure that it is valid to
+ /// apply a schema.
+ ///
+ /// \sa UsdPrim::GetAppliedSchemas()
+ /// \sa UsdPrim::HasAPI()
+ /// \sa UsdPrim::CanApplyAPI()
+ /// \sa UsdPrim::ApplyAPI()
+ /// \sa UsdPrim::RemoveAPI()
+ ///
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static bool
+ CanApply(const UsdPrim &prim, std::string *whyNot=nullptr);
+
+ /// Applies this single-apply API schema to the given \p prim.
+ /// This information is stored by adding "{{ cls.primName }}" to the
+ /// token-valued, listOp metadata \em apiSchemas on the prim.
+ ///
+ /// \return A valid {{ cls.cppClassName }} object is returned upon success.
+ /// An invalid (or empty) {{ cls.cppClassName }} object is returned upon
+ /// failure. See \ref UsdPrim::ApplyAPI() for conditions
+ /// resulting in failure.
+ ///
+ /// \sa UsdPrim::GetAppliedSchemas()
+ /// \sa UsdPrim::HasAPI()
+ /// \sa UsdPrim::CanApplyAPI()
+ /// \sa UsdPrim::ApplyAPI()
+ /// \sa UsdPrim::RemoveAPI()
+ ///
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static {{ cls.cppClassName }}
+ Apply(const UsdPrim &prim);
+{% endif %}
+{% if cls.isAppliedAPISchema and cls.isMultipleApply %}
+
+ /// Returns true if this multiple-apply API schema can be applied,
+ /// with the given instance name, \p name, to the given \p prim. If this
+ /// schema can not be a applied the prim, this returns false and, if
+ /// provided, populates \p whyNot with the reason it can not be applied.
+ ///
+ /// Note that if CanApply returns false, that does not necessarily imply
+ /// that calling Apply will fail. Callers are expected to call CanApply
+ /// before calling Apply if they want to ensure that it is valid to
+ /// apply a schema.
+ ///
+ /// \sa UsdPrim::GetAppliedSchemas()
+ /// \sa UsdPrim::HasAPI()
+ /// \sa UsdPrim::CanApplyAPI()
+ /// \sa UsdPrim::ApplyAPI()
+ /// \sa UsdPrim::RemoveAPI()
+ ///
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static bool
+ CanApply(const UsdPrim &prim, const TfToken &name,
+ std::string *whyNot=nullptr);
+
+ /// Applies this multiple-apply API schema to the given \p prim
+ /// along with the given instance name, \p name.
+ ///
+ /// This information is stored by adding "{{ cls.primName }}:name"
+ /// to the token-valued, listOp metadata \em apiSchemas on the prim.
+ /// For example, if \p name is 'instance1', the token
+ /// '{{ cls.primName }}:instance1' is added to 'apiSchemas'.
+ ///
+ /// \return A valid {{ cls.cppClassName }} object is returned upon success.
+ /// An invalid (or empty) {{ cls.cppClassName }} object is returned upon
+ /// failure. See \ref UsdPrim::ApplyAPI() for
+ /// conditions resulting in failure.
+ ///
+ /// \sa UsdPrim::GetAppliedSchemas()
+ /// \sa UsdPrim::HasAPI()
+ /// \sa UsdPrim::CanApplyAPI()
+ /// \sa UsdPrim::ApplyAPI()
+ /// \sa UsdPrim::RemoveAPI()
+ ///
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static {{ cls.cppClassName }}
+ Apply(const UsdPrim &prim, const TfToken &name);
+{% endif %}
+
+protected:
+ /// Returns the kind of schema this class belongs to.
+ ///
+ /// \sa UsdSchemaKind
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ UsdSchemaKind _GetSchemaKind() const override;
+
+private:
+ // needs to invoke _GetStaticTfType.
+ friend class UsdSchemaRegistry;
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ static const TfType &_GetStaticTfType();
+
+ static bool _IsTypedSchema();
+
+ // override SchemaBase virtuals.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ const TfType &_GetTfType() const override;
+
+{% for attrName in cls.attrOrder %}
+{% set attr = cls.attrs[attrName]%}
+{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
+{% if attr.apiName != '' %}
+public:
+ // --------------------------------------------------------------------- //
+ // {{ Upper(attr.apiName) }}
+ // --------------------------------------------------------------------- //
+ /// {{ attr.doc }}
+ ///
+{% if attr.details %}
+ /// | ||
+ /// | -- | -- |
+{% for detail in attr.details %}
+ /// | {{ detail[0] }} | {{ detail[1] }} |
+{% endfor %}
+{% endif %}
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ UsdAttribute Get{{ Proper(attr.apiName) }}Attr() const;
+
+ /// See Get{{ Proper(attr.apiName) }}Attr(), and also
+ /// \ref Usd_Create_Or_Get_Property for when to use Get vs Create.
+ /// If specified, author \p defaultValue as the attribute's default,
+ /// sparsely (when it makes sense to do so) if \p writeSparsely is \c true -
+ /// the default for \p writeSparsely is \c false.
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ UsdAttribute Create{{ Proper(attr.apiName) }}Attr(VtValue const &defaultValue = VtValue(), bool writeSparsely=false) const;
+
+{% endif %}
+{% endfor %}
+{% for relName in cls.relOrder %}
+{% set rel = cls.rels[relName]%}
+{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
+{% if rel.apiName != '' %}
+public:
+ // --------------------------------------------------------------------- //
+ // {{ Upper(rel.apiName) }}
+ // --------------------------------------------------------------------- //
+ /// {{ rel.doc }}
+ ///
+{% for detail in rel.details %}
+ /// \n {{ detail[0] }}: {{ detail[1] }}
+{% endfor %}
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ UsdRelationship Get{{ Proper(rel.apiName) }}Rel() const;
+
+ /// See Get{{ Proper(rel.apiName) }}Rel(), and also
+ /// \ref Usd_Create_Or_Get_Property for when to use Get vs Create
+ {% if useExportAPI -%}
+ {{ Upper(libraryName) }}_API
+ {% endif -%}
+ UsdRelationship Create{{ Proper(rel.apiName) }}Rel() const;
+{% endif %}
+
+{% endfor %}
+public:
+ // ===================================================================== //
+ // Feel free to add custom code below this line, it will be preserved by
+ // the code generator.
+ //
+ // Just remember to:
+ // - Close the class declaration with };
+{% if useExportAPI %}
+ // - Close the namespace with {{ namespaceClose }}
+{% endif %}
+ // - Close the include guard with #endif
+ // ===================================================================== //
+ // --(BEGIN CUSTOM CODE)--
+
+//
+// Copyright 2016 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+#include "{{ libraryPath }}/tokens.h"
+
+{% if useExportAPI %}
+{{ namespaceOpen }}
+
+{% endif %}
+{{ tokensPrefix }}TokensType::{{ tokensPrefix }}TokensType() :
+{% for token in tokens %}
+ {{ token.id }}("{{ token.value }}", TfToken::Immortal),
+{% endfor %}
+ allTokens({
+{% for token in tokens %}
+ {{ token.id }}{% if not loop.last %},{% endif %}
+
+{% endfor %}
+ })
+{
+}
+
+TfStaticData<{{ tokensPrefix }}TokensType> {{ tokensPrefix }}Tokens;
+{% if useExportAPI %}
+
+{{ namespaceClose }}
+{% endif %}
+//
+// Copyright 2016 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+#ifndef {{ Upper(tokensPrefix) }}_TOKENS_H
+#define {{ Upper(tokensPrefix) }}_TOKENS_H
+
+/// \file {{ libraryName }}/tokens.h
+
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+//
+// This is an automatically generated file (by usdGenSchema.py).
+// Do not hand-edit!
+//
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+
+{% if useExportAPI %}
+#include "pxr/pxr.h"
+#include "{{ libraryPath }}/api.h"
+{% endif %}
+#include "pxr/base/tf/staticData.h"
+#include "pxr/base/tf/token.h"
+#include
+
+{% if useExportAPI %}
+{{ namespaceOpen }}
+
+{% endif %}
+
+/// \class {{ tokensPrefix }}TokensType
+///
+/// \link {{ tokensPrefix }}Tokens \endlink provides static, efficient
+/// \link TfToken TfTokens\endlink for use in all public USD API.
+///
+/// These tokens are auto-generated from the module's schema, representing
+/// property names, for when you need to fetch an attribute or relationship
+/// directly by name, e.g. UsdPrim::GetAttribute(), in the most efficient
+/// manner, and allow the compiler to verify that you spelled the name
+/// correctly.
+///
+/// {{ tokensPrefix }}Tokens also contains all of the \em allowedTokens values
+/// declared for schema builtin attributes of 'token' scene description type.
+{% if tokens %}
+/// Use {{ tokensPrefix }}Tokens like so:
+///
+/// \code
+/// gprim.GetMyTokenValuedAttr().Set({{ tokensPrefix }}Tokens->{{ tokens[0].id }});
+/// \endcode
+{% endif %}
+struct {{ tokensPrefix }}TokensType {
+ {% if useExportAPI %}{{ Upper(libraryName) }}_API {% endif %}{{ tokensPrefix }}TokensType();
+{% for token in tokens %}
+ /// \brief "{{ token.value }}"
+ ///
+ /// {{ token.desc }}
+ const TfToken {{ token.id }};
+{% endfor %}
+ /// A vector of all of the tokens listed above.
+ const std::vector allTokens;
+};
+
+/// \var {{ tokensPrefix }}Tokens
+///
+/// A global variable with static, efficient \link TfToken TfTokens\endlink
+/// for use in all public USD API. \sa {{ tokensPrefix }}TokensType
+extern{% if useExportAPI %} {{ Upper(libraryName) }}_API{% endif %} TfStaticData<{{ tokensPrefix }}TokensType> {{ tokensPrefix }}Tokens;
+{% if useExportAPI %}
+
+{{ namespaceClose }}
+{% endif %}
+
+#endif
+//
+// Copyright 2016 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+#include "{{ libraryPath }}/{{ cls.GetHeaderFile() }}"
+#include "pxr/usd/usd/schemaBase.h"
+
+#include "pxr/usd/sdf/primSpec.h"
+
+#include "pxr/usd/usd/pyConversions.h"
+{% if cls.isAppliedAPISchema %}
+#include "pxr/base/tf/pyAnnotatedBoolResult.h"
+{% endif %}
+#include "pxr/base/tf/pyContainerConversions.h"
+#include "pxr/base/tf/pyResultConversions.h"
+#include "pxr/base/tf/pyUtils.h"
+#include "pxr/base/tf/wrapTypeHelpers.h"
+
+#include
+
+#include
+
+using namespace boost::python;
+
+{% if useExportAPI %}
+{{ namespaceUsing }}
+
+namespace {
+
+{% endif %}
+#define WRAP_CUSTOM \
+ template static void _CustomWrapCode(Cls &_class)
+
+// fwd decl.
+WRAP_CUSTOM;
+
+{% for attrName in cls.attrOrder -%}
+{% set attr = cls.attrs[attrName] %}
+{# Only emit Create/Get API if apiName is not empty string. #}
+{% if attr.apiName != '' %}
+
+static UsdAttribute
+_Create{{ Proper(attr.apiName) }}Attr({{ cls.cppClassName }} &self,
+ object defaultVal, bool writeSparsely) {
+ return self.Create{{ Proper(attr.apiName) }}Attr(
+ UsdPythonToSdfType(defaultVal, {{ attr.usdType }}), writeSparsely);
+}
+{% endif %}
+{% endfor %}
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+
+static bool _WrapIs{{ cls.usdPrimTypeName }}Path(const SdfPath &path) {
+ TfToken collectionName;
+ return {{ cls.cppClassName }}::Is{{ cls.usdPrimTypeName }}Path(
+ path, &collectionName);
+}
+{% endif %}
+{% if not cls.isAPISchemaBase %}
+
+static std::string
+_Repr(const {{ cls.cppClassName }} &self)
+{
+ std::string primRepr = TfPyRepr(self.GetPrim());
+{% if cls.isMultipleApply %}
+ std::string instanceName = TfPyRepr(self.GetName());
+ return TfStringPrintf(
+ "{{ libraryName[0]|upper }}{{ libraryName[1:] }}.{{ cls.className }}(%s, '%s')",
+ primRepr.c_str(), instanceName.c_str());
+{% else %}
+ return TfStringPrintf(
+ "{{ libraryName[0]|upper }}{{ libraryName[1:] }}.{{ cls.className }}(%s)",
+ primRepr.c_str());
+{% endif %}
+}
+{% endif %}
+{% if cls.isAppliedAPISchema %}
+
+struct {{ cls.cppClassName }}_CanApplyResult :
+ public TfPyAnnotatedBoolResult
+{
+ {{ cls.cppClassName }}_CanApplyResult(bool val, std::string const &msg) :
+ TfPyAnnotatedBoolResult(val, msg) {}
+};
+
+{% if cls.isMultipleApply %}
+static {{ cls.cppClassName }}_CanApplyResult
+_WrapCanApply(const UsdPrim& prim, const TfToken& name)
+{
+ std::string whyNot;
+ bool result = {{ cls.cppClassName }}::CanApply(prim, name, &whyNot);
+ return {{ cls.cppClassName }}_CanApplyResult(result, whyNot);
+}
+{% else %}
+static {{ cls.cppClassName }}_CanApplyResult
+_WrapCanApply(const UsdPrim& prim)
+{
+ std::string whyNot;
+ bool result = {{ cls.cppClassName }}::CanApply(prim, &whyNot);
+ return {{ cls.cppClassName }}_CanApplyResult(result, whyNot);
+}
+{% endif %}
+{% endif %}
+{% if useExportAPI %}
+
+} // anonymous namespace
+{% endif %}
+
+void wrap{{ cls.cppClassName }}()
+{
+ typedef {{ cls.cppClassName }} This;
+
+{% if cls.isAppliedAPISchema %}
+ {{ cls.cppClassName }}_CanApplyResult::Wrap<{{ cls.cppClassName }}_CanApplyResult>(
+ "_CanApplyResult", "whyNot");
+
+{% endif %}
+{% if cls.isAPISchemaBase %}
+ class_< This , bases<{{ cls.parentCppClassName }}>, boost::noncopyable> cls ("APISchemaBase", "", no_init);
+{% else %}
+ class_ >
+ cls("{{ cls.className }}");
+{% endif %}
+
+ cls
+{% if not cls.isAPISchemaBase %}
+{% if cls.isMultipleApply %}
+ .def(init((arg("prim"), arg("name"))))
+ .def(init((arg("schemaObj"), arg("name"))))
+{% else %}
+ .def(init(arg("prim")))
+ .def(init(arg("schemaObj")))
+{% endif %}
+{% endif %}
+ .def(TfTypePythonClass())
+
+{% if not cls.isAPISchemaBase %}
+{% if cls.isMultipleApply %}
+ .def("Get",
+ ({{ cls.cppClassName }}(*)(const UsdStagePtr &stage,
+ const SdfPath &path))
+ &This::Get,
+ (arg("stage"), arg("path")))
+ .def("Get",
+ ({{ cls.cppClassName }}(*)(const UsdPrim &prim,
+ const TfToken &name))
+ &This::Get,
+ (arg("prim"), arg("name")))
+{% else %}
+ .def("Get", &This::Get, (arg("stage"), arg("path")))
+{% endif %}
+ .staticmethod("Get")
+{% endif %}
+{% if cls.isMultipleApply %}
+
+ .def("GetAll",
+ (std::vector<{{ cls.cppClassName }}>(*)(const UsdPrim &prim))
+ &This::GetAll,
+ arg("prim"),
+ return_value_policy())
+ .staticmethod("GetAll")
+{% endif %}
+{% if cls.isConcrete %}
+
+ .def("Define", &This::Define, (arg("stage"), arg("path")))
+ .staticmethod("Define")
+{% endif %}
+{% if cls.isAppliedAPISchema and not cls.isMultipleApply %}
+
+ .def("CanApply", &_WrapCanApply, (arg("prim")))
+ .staticmethod("CanApply")
+{% endif %}
+{% if cls.isAppliedAPISchema and cls.isMultipleApply %}
+
+ .def("CanApply", &_WrapCanApply, (arg("prim"), arg("name")))
+ .staticmethod("CanApply")
+{% endif %}
+{% if cls.isAppliedAPISchema and not cls.isMultipleApply %}
+
+ .def("Apply", &This::Apply, (arg("prim")))
+ .staticmethod("Apply")
+{% endif %}
+{% if cls.isAppliedAPISchema and cls.isMultipleApply %}
+
+ .def("Apply", &This::Apply, (arg("prim"), arg("name")))
+ .staticmethod("Apply")
+{% endif %}
+
+{% if cls.isMultipleApply %}
+ .def("GetSchemaAttributeNames",
+ (const TfTokenVector &(*)(bool))&This::GetSchemaAttributeNames,
+ arg("includeInherited")=true,
+ return_value_policy())
+ .def("GetSchemaAttributeNames",
+ (TfTokenVector(*)(bool, const TfToken &))
+ &This::GetSchemaAttributeNames,
+ arg("includeInherited"),
+ arg("instanceName"),
+ return_value_policy())
+{% else %}
+ .def("GetSchemaAttributeNames",
+ &This::GetSchemaAttributeNames,
+ arg("includeInherited")=true,
+ return_value_policy())
+{% endif %}
+ .staticmethod("GetSchemaAttributeNames")
+
+ .def("_GetStaticTfType", (TfType const &(*)()) TfType::Find,
+ return_value_policy())
+ .staticmethod("_GetStaticTfType")
+
+ .def(!self)
+
+{% for attrName in cls.attrOrder -%}
+{% set attr = cls.attrs[attrName] %}
+{# Only emit Create/Get API if apiName is not empty string. #}
+{% if attr.apiName != '' %}
+
+ .def("Get{{ Proper(attr.apiName) }}Attr",
+ &This::Get{{ Proper(attr.apiName) }}Attr)
+ .def("Create{{ Proper(attr.apiName) }}Attr",
+ &_Create{{ Proper(attr.apiName) }}Attr,
+ (arg("defaultValue")=object(),
+ arg("writeSparsely")=false))
+{% endif %}
+{% endfor %}
+
+{% for relName in cls.relOrder -%}
+{# Only emit Create/Get API and doxygen if apiName is not empty string. #}
+{% set rel = cls.rels[relName] %}
+{% if rel.apiName != '' %}
+
+ .def("Get{{ Proper(rel.apiName) }}Rel",
+ &This::Get{{ Proper(rel.apiName) }}Rel)
+ .def("Create{{ Proper(rel.apiName) }}Rel",
+ &This::Create{{ Proper(rel.apiName) }}Rel)
+{% endif %}
+{% endfor %}
+{% if cls.isMultipleApply and cls.propertyNamespacePrefix %}
+ .def("Is{{ cls.usdPrimTypeName }}Path", _WrapIs{{ cls.usdPrimTypeName }}Path)
+ .staticmethod("Is{{ cls.usdPrimTypeName }}Path")
+{% endif %}
+{% if not cls.isAPISchemaBase %}
+ .def("__repr__", ::_Repr)
+{% endif %}
+ ;
+
+ _CustomWrapCode(cls);
+}
+
+// ===================================================================== //
+// Feel free to add custom code below this line, it will be preserved by
+// the code generator. The entry point for your custom code should look
+// minimally like the following:
+//
+// WRAP_CUSTOM {
+// _class
+// .def("MyCustomMethod", ...)
+// ;
+// }
+//
+// Of course any other ancillary or support code may be provided.
+{% if useExportAPI %}
+//
+// Just remember to wrap code in the appropriate delimiters:
+// 'namespace {', '}'.
+//
+{% endif %}
+// ===================================================================== //
+// --(BEGIN CUSTOM CODE)--
+
+//
+// Copyright 2016 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+// GENERATED FILE. DO NOT EDIT.
+#include
+#include "{{ libraryPath }}/tokens.h"
+
+{% if useExportAPI %}
+{{ namespaceUsing }}
+
+{% endif %}
+namespace {
+
+// Helper to return a static token as a string. We wrap tokens as Python
+// strings and for some reason simply wrapping the token using def_readonly
+// bypasses to-Python conversion, leading to the error that there's no
+// Python type for the C++ TfToken type. So we wrap this functor instead.
+class _WrapStaticToken {
+public:
+ _WrapStaticToken(const TfToken* token) : _token(token) { }
+
+ std::string operator()() const
+ {
+ return _token->GetString();
+ }
+
+private:
+ const TfToken* _token;
+};
+
+template
+void
+_AddToken(T& cls, const char* name, const TfToken& token)
+{
+ cls.add_static_property(name,
+ boost::python::make_function(
+ _WrapStaticToken(&token),
+ boost::python::return_value_policy<
+ boost::python::return_by_value>(),
+ boost::mpl::vector1()));
+}
+
+} // anonymous
+
+void wrap{{ tokensPrefix }}Tokens()
+{
+ boost::python::class_<{{ tokensPrefix }}TokensType, boost::noncopyable>
+ cls("Tokens", boost::python::no_init);
+{% for token in tokens %}
+ _AddToken(cls, "{{ token.id }}", {{ tokensPrefix }}Tokens->{{ token.id }});
+{% endfor %}
+}
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class "Typed" (
+ doc = '''The base class for all typed schemas (those that can impart a
+ typeName to a UsdPrim), and therefore the base class for all
+ concrete, instantiable "IsA" schemas.
+
+ UsdTyped implements a typeName-based query for its override of
+ UsdSchemaBase::_IsCompatible(). It provides no other behavior.'''
+)
+{
+}
+
+class "APISchemaBase" (
+ doc = """The base class for all API schemas.
+
+ An API schema provides an interface to a prim's qualities, but does not
+ specify a typeName for the underlying prim. The prim's qualities include
+ its inheritance structure, attributes, relationships etc. Since it cannot
+ provide a typeName, an API schema is considered to be non-concrete.
+
+ To auto-generate an API schema using usdGenSchema, simply leave the
+ typeName empty and make it inherit from \"/APISchemaBase\" or from another
+ API schema. See UsdModelAPI, UsdClipsAPI and UsdCollectionAPI for examples.
+
+ API schemas are classified into applied and non-applied API schemas.
+ The author of an API schema has to decide on the type of API schema
+ at the time of its creation by setting customData['apiSchemaType'] in the
+ schema definition (i.e. in the associated primSpec inside the schema.usda
+ file). UsdAPISchemaBase implements methods that are used to record the
+ application of an API schema on a USD prim.
+
+ If an API schema only provides an interface to set certain core bits of
+ metadata (like UsdModelAPI, which sets model kind and UsdClipsAPI, which
+ sets clips-related metadata) OR if the API schema can apply to any type of
+ prim or only to a known fixed set of prim types OR if there is no use of
+ recording the application of the API schema, in such cases, it would be
+ better to make it a non-applied API schema. Examples of non-applied API
+ schemas include UsdModelAPI, UsdClipsAPI, UsdShadeConnectableAPI and
+ UsdGeomPrimvarsAPI.
+
+ If there is a need to discover (or record) whether a prim contains or
+ subscribes to a given API schema, it would be advantageous to make the API
+ schema be \"applied\". In general, API schemas that add one or more properties
+ to a prim should be tagged as applied API schemas. A public Apply() method
+ is generated for applied API schemas by usdGenSchema. An applied API schema
+ must be applied to a prim via a call to the generated Apply() method, for
+ the schema object to evaluate to true when converted to a bool using the
+ explicit bool conversion operator. Examples of applied API schemas include
+ UsdCollectionAPI, UsdGeomModelAPI and UsdGeomMotionAPI
+
+ \\anchor UsdAPISchemaBase_SingleVsMultipleApply
+ \\name Single vs. Multiple Apply API Schemas
+
+ Applied API schemas can further be classified into single-apply and
+ multiple-apply API schemas. As the name suggests, a single-apply API schema
+ can only be applied once to a prim. A multiple-apply API schema can be
+ applied multiple times with different 'instanceName' values. An example of
+ a multiple-apply API schema is UsdCollectionAPI, where the API schema is
+ applied to a prim once for every collection owned by the prim.
+
+ \\note An applied API schema can only inherit from another applied API
+ schema or directly from APISchemaBase. Similarly, a non-applied API schema
+ can only inherit from a non-applied API Schema or directly from
+ APISchemaBase. 'usdGenSchema' attempts to issue a warning if it detects
+ an incompatibility.
+
+ \\note A multiple-apply API schema may not inherit from a single-apply API
+ schema and vice versa.
+
+ \\note When the bool-conversion operator is invoked on an applied API
+ schema, it evaluates to true only if the application of the API schema has
+ been recorded on the prim via a call to the auto-generated Apply() method.
+
+ """
+)
+{
+}
+
+class "ModelAPI" (
+ doc = """UsdModelAPI is an API schema that provides an interface to a prim's
+ model qualities, if it does, in fact, represent the root prim of a model.
+
+ The first and foremost model quality is its kind, i.e. the metadata
+ that establishes it as a model (See KindRegistry). UsdModelAPI provides
+ various methods for setting and querying the prim's kind, as well as
+ queries (also available on UsdPrim) for asking what category of model
+ the prim is. See \"Kind and Model-ness\".
+
+ UsdModelAPI also provides access to a prim's \"assetInfo\"
+ data. While any prim can host assetInfo, it is common that published
+ (referenced) assets are packaged as models, therefore it is convenient
+ to provide access to the one from the other.
+
+ \\todo establish an _IsCompatible() override that returns IsModel()
+ \\todo GetModelInstanceName()
+ """
+)
+{
+}
+
+class "CollectionAPI" (
+ doc = """ This is a general purpose API schema, used to describe a
+ collection of heterogeneous objects within the scene. \"Objects\" here may be
+ prims or properties belonging to prims or other collections. It's an add-on
+ schema that can be applied many times to a prim with different collection
+ names.
+
+ A collection allows an enumeration of a set of paths to include and a
+ set of paths to exclude. Whether the descendants of an included
+ path are members of a collection are decided by its expansion rule
+ (see below). If the collection excludes paths that are not descendents
+ of included paths, the collection implicitly includes the root path
+ </>. If such a collection also includes paths that are not
+ descendants of the excluded paths, it is considered invalid, since
+ the intention is ambiguous.
+
+ All the properties authored by the schema are namespaced under
+ \"collection:\". The given name of the collection provides additional
+ namespacing for the various per-collection properties, which include the
+ following:
+
+
uniform token collection:collectionName:expansionRule -
+ specified how the paths that are included in the collection must be expanded
+ to determine its members. Possible values include:
+
+
explicitOnly - only paths in the includes rel targets and not
+ in the excludes rel targets belong to the collection.
+
+
expandPrims - all the prims at or below the includes rel-
+ targets (and not under the excludes rel-targets) belong to the
+ collection. Any property paths included in the collection would, of
+ course, also be honored. This is the default behavior as it satisfies
+ most use cases.
+
+
expandPrimsAndProperties - like expandPrims, but also
+ includes all properties on all matched prims. We're still not quite
+ sure what the use cases are for this, but you can use it to capture a
+ whole lot of UsdObjects very concisely.
+
+
+
+
bool collection:collectionName:includeRoot - boolean
+ attribute indicating whether the pseudo-root path </> should
+ be counted as one of the included target paths. The fallback is false.
+ This separate attribute is required because relationships cannot
+ directly target the root. When expansionRule is explicitOnly, this
+ attribute is ignored.
+
rel collection:collectionName:includes - specifies a list
+ of targets that are included in the collection. This can target prims or
+ properties directly. A collection can insert the rules of another
+ collection by making its includes relationship target the
+ collection:{collectionName} property on the owning prim of the
+ collection to be included (see UsdCollectionAPI::GetCollectionAttr).
+ It is important to note that including another collection does not
+ guarantee the contents of that collection will be in the final collection;
+ instead, the rules are merged. This means, for example, an exclude
+ entry may exclude a portion of the included collection.
+ When a collection includes one or more collections, the order in which
+ targets are added to the includes relationship may become significant, if
+ there are conflicting opinions about the same path. Targets that are added
+ later are considered to be stronger than earlier targets for the same path.
+
+
rel collection:collectionName:excludes - specifies a list
+ of targets that are excluded below the included paths in this
+ collection. This can target prims or properties directly, but cannot
+ target another collection. This is to keep the membership determining
+ logic simple, efficient and easier to reason about. Finally, it is invalid
+ for a collection to exclude paths that are not included in it. The presence
+ of such \"orphaned\" excluded paths will not affect the set of paths included
+ in the collection, but may affect the performance of querying membership of
+ a path in the collection (see UsdCollectionAPI::MembershipQuery::IsPathIncluded)
+ or of enumerating the objects belonging to the collection (see
+ UsdCollectionAPI::GetIncludedObjects).
+
+
uniform opaque collection:collectionName - opaque
+ attribute (meaning it can never have a value) that represents the collection
+ for the purpose of allowing another collection to include it. When this
+ property is targeted by another collection's includes relationship,
+ the rules of this collection will be inserted into the rules of the collection
+ that includes it.
+
+
+ Implicit inclusion
+
+ In some scenarios it is useful to express a collection that includes
+ everything except certain paths. To support this, a collection
+ that has an exclude that is not a descendent of any include
+ will include the root path </>.
+
+ Creating collections in C++
+
+ \\snippet examples_usd.cpp ApplyCollections
+ """
+)
+{
+ uniform opaque collection:__INSTANCE_NAME__ (
+ doc = """This property represents the collection for the purpose of
+ allowing another collection to include it. When this property is
+ targeted by another collection's includes relationship, the rules
+ of this collection will be inserted into the rules of the collection
+ that includes it.
+ """
+ )
+ rel collection:__INSTANCE_NAME__:excludes (
+ doc = '''Specifies a list of targets that are excluded below
+ the included paths in this collection. This can target prims or
+ properties directly, but cannot target another collection. This is to
+ keep the membership determining logic simple, efficient and easier to
+ reason about. Finally, it is invalid for a collection to exclude
+ paths that are not included in it. The presence of such "orphaned"
+ excluded paths will not affect the set of paths included in the
+ collection, but may affect the performance of querying membership of
+ a path in the collection (see
+ UsdCollectionAPI::MembershipQuery::IsPathIncluded)
+ or of enumerating the objects belonging to the collection (see
+ UsdCollectionAPI::GetIncludedObjects).'''
+ )
+ uniform token collection:__INSTANCE_NAME__:expansionRule = "expandPrims" (
+ allowedTokens = ["explicitOnly", "expandPrims", "expandPrimsAndProperties"]
+ doc = """Specifies how the paths that are included in
+ the collection must be expanded to determine its members."""
+ )
+ uniform bool collection:__INSTANCE_NAME__:includeRoot (
+ doc = """Boolean attribute indicating whether the pseudo-root
+ path </> should be counted as one of the included target
+ paths. The fallback is false. This separate attribute is
+ required because relationships cannot directly target the root."""
+ )
+ rel collection:__INSTANCE_NAME__:includes (
+ doc = """Specifies a list of targets that are included in the collection.
+ This can target prims or properties directly. A collection can insert
+ the rules of another collection by making its includes
+ relationship target the collection:{collectionName} property on
+ the owning prim of the collection to be included"""
+ )
+ uniform pathExpression collection:__INSTANCE_NAME__:membershipExpression (
+ doc = """Specifies a path expression that determines membership in this
+ collection."""
+ )
+}
+
+class "ClipsAPI" (
+ doc = """ UsdClipsAPI is an API schema that provides an interface to
+ a prim's clip metadata. Clips are a \"value resolution\" feature that
+ allows one to specify a sequence of usd files (clips) to be consulted,
+ over time, as a source of varying overrides for the prims at and
+ beneath this prim in namespace.
+
+ SetClipAssetPaths() establishes the set of clips that can be consulted.
+ SetClipActive() specifies the ordering of clip application over time
+ (clips can be repeated), while SetClipTimes() specifies time-mapping
+ from stage-time to clip-time for the clip active at a given stage-time,
+ which allows for time-dilation and repetition of clips.
+ Finally, SetClipPrimPath() determines the path within each clip that will
+ map to this prim, i.e. the location within the clip at which we will look
+ for opinions for this prim.
+
+ The clip asset paths, times and active metadata can also be specified
+ through template clip metadata. This can be desirable when your set of
+ assets is very large, as the template metadata is much more concise.
+ SetClipTemplateAssetPath() establishes the asset identifier pattern of the
+ set of clips to be consulted. SetClipTemplateStride(),
+ SetClipTemplateEndTime(), and SetClipTemplateStartTime() specify the range
+ in which USD will search, based on the template. From the set of resolved
+ asset paths, times and active will be derived internally.
+
+ A prim may have multiple \"clip sets\" -- named sets of clips that each
+ have their own values for the metadata described above. For example,
+ a prim might have a clip set named \"Clips_1\" that specifies some group
+ of clip asset paths, and another clip set named \"Clips_2\" that uses
+ an entirely different set of clip asset paths. These clip sets are
+ composed across composition arcs, so clip sets for a prim may be
+ defined in multiple sublayers or references, for example. Individual
+ metadata for a given clip set may be sparsely overridden.
+
+ Important facts about clips:
+ - Within the layerstack in which clips are established, the
+ opinions within the clips will be weaker than any local opinions
+ in the layerstack, but em stronger than varying opinions coming across
+ references and variants.
+ - We will never look for metadata or default opinions in clips
+ when performing value resolution on the owning stage, since these
+ quantities must be time-invariant.
+
+ This leads to the common structure in which we reference a model asset
+ on a prim, and then author clips at the same site: the asset reference
+ will provide the topology and unvarying data for the model, while the
+ clips will provide the time-sampled animation.
+
+ For further information, see \\ref Usd_Page_ValueClips
+ """
+)
+{
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "SdfMetadata": {
+ "apiSchemas": {
+ "appliesTo": "prims",
+ "type": "tokenlistop"
+ },
+ "clipSets": {
+ "appliesTo": [
+ "prims"
+ ],
+ "type": "stringlistop"
+ },
+ "clips": {
+ "appliesTo": [
+ "prims"
+ ],
+ "type": "dictionary"
+ },
+ "fallbackPrimTypes": {
+ "appliesTo": [
+ "layers"
+ ],
+ "type": "dictionary"
+ }
+ },
+ "Types": {
+ "UsdAPISchemaBase": {
+ "alias": {
+ "UsdSchemaBase": "APISchemaBase"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdSchemaBase"
+ ],
+ "schemaKind": "abstractBase"
+ },
+ "UsdClipsAPI": {
+ "alias": {
+ "UsdSchemaBase": "ClipsAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "nonAppliedAPI"
+ },
+ "UsdCollectionAPI": {
+ "alias": {
+ "UsdSchemaBase": "CollectionAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "multipleApplyAPI"
+ },
+ "UsdModelAPI": {
+ "alias": {
+ "UsdSchemaBase": "ModelAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "nonAppliedAPI"
+ },
+ "UsdSchemaBase": {
+ "alias": {
+ "UsdSchemaBase": "SchemaBase"
+ },
+ "schemaKind": "abstractBase"
+ },
+ "UsdTyped": {
+ "alias": {
+ "UsdSchemaBase": "Typed"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdSchemaBase"
+ ],
+ "schemaKind": "abstractBase"
+ },
+ "UsdUsdFileFormat": {
+ "bases": [
+ "SdfFileFormat"
+ ],
+ "displayName": "USD File Format",
+ "extensions": [
+ "usd"
+ ],
+ "formatId": "usd",
+ "primary": true,
+ "target": "usd"
+ },
+ "UsdUsdaFileFormat": {
+ "bases": [
+ "SdfTextFileFormat"
+ ],
+ "displayName": "USD Text File Format",
+ "extensions": [
+ "usda"
+ ],
+ "formatId": "usda",
+ "primary": true,
+ "target": "usd"
+ },
+ "UsdUsdcFileFormat": {
+ "bases": [
+ "SdfFileFormat"
+ ],
+ "displayName": "USD Crate File Format",
+ "extensions": [
+ "usdc"
+ ],
+ "formatId": "usdc",
+ "primary": true,
+ "target": "usd"
+ },
+ "UsdUsdzFileFormat": {
+ "bases": [
+ "SdfFileFormat"
+ ],
+ "displayName": "USDZ File Format",
+ "extensions": [
+ "usdz"
+ ],
+ "formatId": "usdz",
+ "primary": true,
+ "supportsEditing": false,
+ "supportsWriting": false,
+ "target": "usd"
+ },
+ "Usd_UsdzResolver": {
+ "bases": [
+ "ArPackageResolver"
+ ],
+ "extensions": [
+ "usdz"
+ ]
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usd",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+(
+ "This file describes the USD primitive schemata and drives code generation."
+)
+
+def "GLOBAL" (
+ customData = {
+ string libraryName = "usd"
+ string libraryPath = "pxr/usd/usd"
+ # string libraryPrefix = "Usd"
+ # string tokensPrefix = "Usd"
+ dictionary libraryTokens = {
+ dictionary apiSchemas = {
+ string doc = """
+ A listop metadata containing the API schemas which have
+ been applied to this prim, using the Apply() method on
+ the particular schema class.
+ """
+ }
+ dictionary fallbackPrimTypes = {
+ string doc = """
+ A dictionary metadata that maps the name of a concrete schema
+ prim type to an ordered list of schema prim types to use instead
+ if the schema prim type doesn't exist in version of USD being
+ used.
+ """
+ }
+ }
+ }
+)
+{
+}
+
+class "Typed"
+(
+ doc = """The base class for all \\em typed schemas (those that can impart a
+ typeName to a UsdPrim), and therefore the base class for all
+ concrete, instantiable "IsA" schemas.
+
+ UsdTyped implements a typeName-based query for its override of
+ UsdSchemaBase::_IsCompatible(). It provides no other behavior."""
+)
+{
+}
+
+class "APISchemaBase"
+(
+ doc = """The base class for all \\em API schemas.
+
+ An API schema provides an interface to a prim's qualities, but does not
+ specify a typeName for the underlying prim. The prim's qualities include
+ its inheritance structure, attributes, relationships etc. Since it cannot
+ provide a typeName, an API schema is considered to be non-concrete.
+
+ To auto-generate an API schema using usdGenSchema, simply leave the
+ typeName empty and make it inherit from "/APISchemaBase" or from another
+ API schema. See UsdModelAPI, UsdClipsAPI and UsdCollectionAPI for examples.
+
+ API schemas are classified into applied and non-applied API schemas.
+ The author of an API schema has to decide on the type of API schema
+ at the time of its creation by setting customData['apiSchemaType'] in the
+ schema definition (i.e. in the associated primSpec inside the schema.usda
+ file). UsdAPISchemaBase implements methods that are used to record the
+ application of an API schema on a USD prim.
+
+ If an API schema only provides an interface to set certain core bits of
+ metadata (like UsdModelAPI, which sets model kind and UsdClipsAPI, which
+ sets clips-related metadata) OR if the API schema can apply to any type of
+ prim or only to a known fixed set of prim types OR if there is no use of
+ recording the application of the API schema, in such cases, it would be
+ better to make it a non-applied API schema. Examples of non-applied API
+ schemas include UsdModelAPI, UsdClipsAPI, UsdShadeConnectableAPI and
+ UsdGeomPrimvarsAPI.
+
+ If there is a need to discover (or record) whether a prim contains or
+ subscribes to a given API schema, it would be advantageous to make the API
+ schema be "applied". In general, API schemas that add one or more properties
+ to a prim should be tagged as applied API schemas. A public Apply() method
+ is generated for applied API schemas by usdGenSchema. An applied API schema
+ must be applied to a prim via a call to the generated Apply() method, for
+ the schema object to evaluate to true when converted to a bool using the
+ explicit bool conversion operator. Examples of applied API schemas include
+ UsdCollectionAPI, UsdGeomModelAPI and UsdGeomMotionAPI
+
+ \\anchor UsdAPISchemaBase_SingleVsMultipleApply
+ \\name Single vs. Multiple Apply API Schemas
+
+ Applied API schemas can further be classified into single-apply and
+ multiple-apply API schemas. As the name suggests, a single-apply API schema
+ can only be applied once to a prim. A multiple-apply API schema can be
+ applied multiple times with different 'instanceName' values. An example of
+ a multiple-apply API schema is UsdCollectionAPI, where the API schema is
+ applied to a prim once for every collection owned by the prim.
+
+ \\note An applied API schema can only inherit from another applied API
+ schema or directly from APISchemaBase. Similarly, a non-applied API schema
+ can only inherit from a non-applied API Schema or directly from
+ APISchemaBase. 'usdGenSchema' attempts to issue a warning if it detects
+ an incompatibility.
+
+ \\note A multiple-apply API schema may not inherit from a single-apply API
+ schema and vice versa.
+
+ \\note When the bool-conversion operator is invoked on an applied API
+ schema, it evaluates to true only if the application of the API schema has
+ been recorded on the prim via a call to the auto-generated Apply() method.
+
+ """
+ customData = {
+ string fileName = "apiSchemaBase"
+ }
+)
+{
+}
+
+class "ModelAPI"
+(
+ inherits =
+ doc = """UsdModelAPI is an API schema that provides an interface to a prim's
+ model qualities, if it does, in fact, represent the root prim of a model.
+
+ The first and foremost model quality is its \\em kind, i.e. the metadata
+ that establishes it as a model (See KindRegistry). UsdModelAPI provides
+ various methods for setting and querying the prim's kind, as well as
+ queries (also available on UsdPrim) for asking what category of model
+ the prim is. See \\ref Usd_ModelKind "Kind and Model-ness".
+
+ UsdModelAPI also provides access to a prim's \\ref Usd_Model_AssetInfo "assetInfo"
+ data. While any prim \\em can host assetInfo, it is common that published
+ (referenced) assets are packaged as models, therefore it is convenient
+ to provide access to the one from the other.
+
+ \\todo establish an _IsCompatible() override that returns IsModel()
+ \\todo GetModelInstanceName()
+ """
+ customData = {
+ string apiSchemaType = "nonApplied"
+ }
+)
+{
+}
+
+class "CollectionAPI"
+(
+ inherits =
+ doc = """ This is a general purpose API schema, used to describe a
+ collection of heterogeneous objects within the scene. "Objects" here may be
+ prims or properties belonging to prims or other collections. It's an add-on
+ schema that can be applied many times to a prim with different collection
+ names.
+
+ A collection allows an enumeration of a set of paths to include and a
+ set of paths to exclude. Whether the descendants of an included
+ path are members of a collection are decided by its expansion rule
+ (see below). If the collection excludes paths that are not descendents
+ of included paths, the collection implicitly includes the root path
+ </>. If such a collection also includes paths that are not
+ descendants of the excluded paths, it is considered invalid, since
+ the intention is ambiguous.
+
+ All the properties authored by the schema are namespaced under
+ "collection:". The given name of the collection provides additional
+ namespacing for the various per-collection properties, which include the
+ following:
+
+
uniform token collection:collectionName:expansionRule -
+ specified how the paths that are included in the collection must be expanded
+ to determine its members. Possible values include:
+
+
explicitOnly - only paths in the includes rel targets and not
+ in the excludes rel targets belong to the collection.
+
+
expandPrims - all the prims at or below the includes rel-
+ targets (and not under the excludes rel-targets) belong to the
+ collection. Any property paths included in the collection would, of
+ course, also be honored. This is the default behavior as it satisfies
+ most use cases.
+
+
expandPrimsAndProperties - like expandPrims, but also
+ includes all properties on all matched prims. We're still not quite
+ sure what the use cases are for this, but you can use it to capture a
+ whole lot of UsdObjects very concisely.
+
+
+
+
bool collection:collectionName:includeRoot - boolean
+ attribute indicating whether the pseudo-root path </> should
+ be counted as one of the included target paths. The fallback is false.
+ This separate attribute is required because relationships cannot
+ directly target the root. When expansionRule is explicitOnly, this
+ attribute is ignored.
+
rel collection:collectionName:includes - specifies a list
+ of targets that are included in the collection. This can target prims or
+ properties directly. A collection can insert the rules of another
+ collection by making its includes relationship target the
+ collection:{collectionName} property on the owning prim of the
+ collection to be included (see UsdCollectionAPI::GetCollectionAttr).
+ It is important to note that including another collection does not
+ guarantee the contents of that collection will be in the final collection;
+ instead, the rules are merged. This means, for example, an exclude
+ entry may exclude a portion of the included collection.
+ When a collection includes one or more collections, the order in which
+ targets are added to the includes relationship may become significant, if
+ there are conflicting opinions about the same path. Targets that are added
+ later are considered to be stronger than earlier targets for the same path.
+
+
rel collection:collectionName:excludes - specifies a list
+ of targets that are excluded below the included paths in this
+ collection. This can target prims or properties directly, but cannot
+ target another collection. This is to keep the membership determining
+ logic simple, efficient and easier to reason about. Finally, it is invalid
+ for a collection to exclude paths that are not included in it. The presence
+ of such "orphaned" excluded paths will not affect the set of paths included
+ in the collection, but may affect the performance of querying membership of
+ a path in the collection (see UsdCollectionAPI::MembershipQuery::IsPathIncluded)
+ or of enumerating the objects belonging to the collection (see
+ UsdCollectionAPI::GetIncludedObjects).
+
+
uniform opaque collection:collectionName - opaque
+ attribute (meaning it can never have a value) that represents the collection
+ for the purpose of allowing another collection to include it. When this
+ property is targeted by another collection's includes relationship,
+ the rules of this collection will be inserted into the rules of the collection
+ that includes it.
+
+
+ Implicit inclusion
+
+ In some scenarios it is useful to express a collection that includes
+ everything except certain paths. To support this, a collection
+ that has an exclude that is not a descendent of any include
+ will include the root path </>.
+
+ Creating collections in C++
+
+ \\snippet examples_usd.cpp ApplyCollections
+ """
+
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usd/collectionMembershipQuery.h"
+#include "pxr/usd/usd/primFlags.h"
+#include "pxr/usd/usd/tokens.h"
+#include "pxr/usd/sdf/pathExpression.h"
+"""
+ token apiSchemaType = "multipleApply"
+ token propertyNamespacePrefix = "collection"
+ dictionary schemaTokens = {
+ dictionary exclude = {
+ string doc = """
+ This is the token used to exclude a path from a collection.
+ Although it is not a possible value for the "expansionRule"
+ attribute, it is used as the expansionRule for excluded paths
+ in UsdCollectionAPI::MembershipQuery::IsPathIncluded.
+ """
+ }
+ }
+ }
+)
+{
+ uniform token expansionRule = "expandPrims" (
+ allowedTokens = ["explicitOnly", "expandPrims", "expandPrimsAndProperties"]
+ doc = """Specifies how the paths that are included in
+ the collection must be expanded to determine its members."""
+ )
+ uniform bool includeRoot (
+ doc = """Boolean attribute indicating whether the pseudo-root
+ path </> should be counted as one of the included target
+ paths. The fallback is false. This separate attribute is
+ required because relationships cannot directly target the root."""
+ )
+ rel includes (
+ doc = """Specifies a list of targets that are included in the collection.
+ This can target prims or properties directly. A collection can insert
+ the rules of another collection by making its includes
+ relationship target the collection:{collectionName} property on
+ the owning prim of the collection to be included"""
+ )
+ rel excludes (
+ doc = """Specifies a list of targets that are excluded below
+ the included paths in this collection. This can target prims or
+ properties directly, but cannot target another collection. This is to
+ keep the membership determining logic simple, efficient and easier to
+ reason about. Finally, it is invalid for a collection to exclude
+ paths that are not included in it. The presence of such "orphaned"
+ excluded paths will not affect the set of paths included in the
+ collection, but may affect the performance of querying membership of
+ a path in the collection (see
+ UsdCollectionAPI::MembershipQuery::IsPathIncluded)
+ or of enumerating the objects belonging to the collection (see
+ UsdCollectionAPI::GetIncludedObjects)."""
+ )
+ uniform pathExpression membershipExpression (
+ doc = """Specifies a path expression that determines membership in this
+ collection."""
+ )
+ uniform opaque __INSTANCE_NAME__ (
+ customData = {
+ string apiName = "Collection"
+ }
+ doc = """This property represents the collection for the purpose of
+ allowing another collection to include it. When this property is
+ targeted by another collection's includes relationship, the rules
+ of this collection will be inserted into the rules of the collection
+ that includes it.
+ """
+ )
+}
+
+class "ClipsAPI"
+(
+ inherits =
+ doc = """ UsdClipsAPI is an API schema that provides an interface to
+ a prim's clip metadata. Clips are a "value resolution" feature that
+ allows one to specify a sequence of usd files (clips) to be consulted,
+ over time, as a source of varying overrides for the prims at and
+ beneath this prim in namespace.
+
+ SetClipAssetPaths() establishes the set of clips that can be consulted.
+ SetClipActive() specifies the ordering of clip application over time
+ (clips can be repeated), while SetClipTimes() specifies time-mapping
+ from stage-time to clip-time for the clip active at a given stage-time,
+ which allows for time-dilation and repetition of clips.
+ Finally, SetClipPrimPath() determines the path within each clip that will
+ map to this prim, i.e. the location within the clip at which we will look
+ for opinions for this prim.
+
+ The clip asset paths, times and active metadata can also be specified
+ through template clip metadata. This can be desirable when your set of
+ assets is very large, as the template metadata is much more concise.
+ SetClipTemplateAssetPath() establishes the asset identifier pattern of the
+ set of clips to be consulted. SetClipTemplateStride(),
+ SetClipTemplateEndTime(), and SetClipTemplateStartTime() specify the range
+ in which USD will search, based on the template. From the set of resolved
+ asset paths, times and active will be derived internally.
+
+ A prim may have multiple "clip sets" -- named sets of clips that each
+ have their own values for the metadata described above. For example,
+ a prim might have a clip set named "Clips_1" that specifies some group
+ of clip asset paths, and another clip set named "Clips_2" that uses
+ an entirely different set of clip asset paths. These clip sets are
+ composed across composition arcs, so clip sets for a prim may be
+ defined in multiple sublayers or references, for example. Individual
+ metadata for a given clip set may be sparsely overridden.
+
+ Important facts about clips:
+ \\li Within the layerstack in which clips are established, the
+ opinions within the clips will be \\em weaker than any local opinions
+ in the layerstack, but \em stronger than varying opinions coming across
+ references and variants.
+ \\li We will never look for metadata or default opinions in clips
+ when performing value resolution on the owning stage, since these
+ quantities must be time-invariant.
+
+ This leads to the common structure in which we reference a model asset
+ on a prim, and then author clips at the same site: the asset reference
+ will provide the topology and unvarying data for the model, while the
+ clips will provide the time-sampled animation.
+
+ For further information, see \\ref Usd_Page_ValueClips
+ """
+ customData = {
+ token apiSchemaType = "nonApplied"
+ dictionary schemaTokens = {
+ dictionary clips = {
+ string doc = """
+ Dictionary that contains the definition of the clip sets on
+ this prim. See \\ref UsdClipsAPI::GetClips.
+ """
+ }
+
+ dictionary clipSets = {
+ string doc = """
+ ListOp that may be used to affect how opinions from
+ clip sets are applied during value resolution.
+ See \\ref UsdClipsAPI::GetClipSets.
+ """
+ }
+ }
+ }
+)
+{
+}
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class "Imageable" (
+ doc = """Base class for all prims that may require rendering or
+ visualization of some sort. The primary attributes of Imageable
+ are visibility and purpose, which each provide instructions for
+ what geometry should be included for processing by rendering and other
+ computations.
+
+ \\deprecated Imageable also provides API for accessing primvars, which
+ has been moved to the UsdGeomPrimvarsAPI schema, because primvars can now
+ be applied on non-Imageable prim types. This API is planned
+ to be removed, UsdGeomPrimvarsAPI should be used directly instead."""
+)
+{
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+}
+
+class "VisibilityAPI" (
+ doc = '''
+ UsdGeomVisibilityAPI introduces properties that can be used to author
+ visibility opinions.
+
+ \\note
+ Currently, this schema only introduces the attributes that are used to
+ control purpose visibility. Later, this schema will define _all_
+ visibility-related properties and UsdGeomImageable will no longer define
+ those properties.
+
+ The purpose visibility attributes added by this schema,
+ _guideVisibility_, _proxyVisibility_, and _renderVisibility_ can each be
+ used to control visibility for geometry of the corresponding purpose
+ values, with the overall _visibility_ attribute acting as an
+ override. I.e., if _visibility_ evaluates to "invisible", purpose
+ visibility is invisible; otherwise, purpose visibility is determined by
+ the corresponding purpose visibility attribute.
+
+ Note that the behavior of _guideVisibility_ is subtly different from the
+ _proxyVisibility_ and _renderVisibility_ attributes, in that "guide"
+ purpose visibility always evaluates to either "invisible" or "visible",
+ whereas the other attributes may yield computed values of "inherited" if
+ there is no authored opinion on the attribute or inherited from an
+ ancestor. This is motivated by the fact that, in Pixar"s user workflows,
+ we have never found a need to have all guides visible in a scene by
+ default, whereas we do find that flexibility useful for "proxy" and
+ "render" geometry.
+
+ This schema can only be applied to UsdGeomImageable prims. The
+ UseGeomImageable schema provides API for computing the purpose visibility
+ values that result from the attributes introduced by this schema.
+ '''
+)
+{
+ uniform token guideVisibility = "invisible" (
+ allowedTokens = ["inherited", "invisible", "visible"]
+ doc = '''
+ This attribute controls visibility for geometry with purpose "guide".
+
+ Unlike overall _visibility_, _guideVisibility_ is uniform, and
+ therefore cannot be animated.
+
+ Also unlike overall _visibility_, _guideVisibility_ is tri-state, in
+ that a descendant with an opinion of "visible" overrides an ancestor
+ opinion of "invisible".
+
+ The _guideVisibility_ attribute works in concert with the overall
+ _visibility_ attribute: The visibility of a prim with purpose "guide"
+ is determined by the inherited values it receives for the _visibility_
+ and _guideVisibility_ attributes. If _visibility_ evaluates to
+ "invisible", the prim is invisible. If _visibility_ evaluates to
+ "inherited" and _guideVisibility_ evaluates to "visible", then the
+ prim is visible. __Otherwise, it is invisible.__
+ '''
+ )
+ uniform token proxyVisibility = "inherited" (
+ allowedTokens = ["inherited", "invisible", "visible"]
+ doc = '''
+ This attribute controls visibility for geometry with purpose "proxy".
+
+ Unlike overall _visibility_, _proxyVisibility_ is uniform, and
+ therefore cannot be animated.
+
+ Also unlike overall _visibility_, _proxyVisibility_ is tri-state, in
+ that a descendant with an opinion of "visible" overrides an ancestor
+ opinion of "invisible".
+
+ The _proxyVisibility_ attribute works in concert with the overall
+ _visibility_ attribute: The visibility of a prim with purpose "proxy"
+ is determined by the inherited values it receives for the _visibility_
+ and _proxyVisibility_ attributes. If _visibility_ evaluates to
+ "invisible", the prim is invisible. If _visibility_ evaluates to
+ "inherited" then: If _proxyVisibility_ evaluates to "visible", then
+ the prim is visible; if _proxyVisibility_ evaluates to "invisible",
+ then the prim is invisible; if _proxyVisibility_ evaluates to
+ "inherited", then the prim may either be visible or invisible,
+ depending on a fallback value determined by the calling context.
+ '''
+ )
+ uniform token renderVisibility = "inherited" (
+ allowedTokens = ["inherited", "invisible", "visible"]
+ doc = '''
+ This attribute controls visibility for geometry with purpose
+ "render".
+
+ Unlike overall _visibility_, _renderVisibility_ is uniform, and
+ therefore cannot be animated.
+
+ Also unlike overall _visibility_, _renderVisibility_ is tri-state, in
+ that a descendant with an opinion of "visible" overrides an ancestor
+ opinion of "invisible".
+
+ The _renderVisibility_ attribute works in concert with the overall
+ _visibility_ attribute: The visibility of a prim with purpose "render"
+ is determined by the inherited values it receives for the _visibility_
+ and _renderVisibility_ attributes. If _visibility_ evaluates to
+ "invisible", the prim is invisible. If _visibility_ evaluates to
+ "inherited" then: If _renderVisibility_ evaluates to "visible", then
+ the prim is visible; if _renderVisibility_ evaluates to "invisible",
+ then the prim is invisible; if _renderVisibility_ evaluates to
+ "inherited", then the prim may either be visible or invisible,
+ depending on a fallback value determined by the calling context.
+ '''
+ )
+}
+
+class "PrimvarsAPI" (
+ doc = """UsdGeomPrimvarsAPI encodes geometric \"primitive variables\",
+ as UsdGeomPrimvar, which interpolate across a primitive's topology,
+ can override shader inputs, and inherit down namespace.
+
+ Which Method to Use to Retrieve Primvars
+
+ While creating primvars is unambiguous (CreatePrimvar()), there are quite
+ a few methods available for retrieving primvars, making it potentially
+ confusing knowing which one to use. Here are some guidelines:
+
+ - If you are populating a GUI with the primvars already available for
+ authoring values on a prim, use GetPrimvars().
+ - If you want all of the \"useful\" (e.g. to a renderer) primvars
+ available at a prim, including those inherited from ancestor prims, use
+ FindPrimvarsWithInheritance(). Note that doing so individually for many
+ prims will be inefficient.
+ - To find a particular primvar defined directly on a prim, which may
+ or may not provide a value, use GetPrimvar().
+ - To find a particular primvar defined on a prim or inherited from
+ ancestors, which may or may not provide a value, use
+ FindPrimvarWithInheritance().
+ - To *efficiently* query for primvars using the overloads of
+ FindPrimvarWithInheritance() and FindPrimvarsWithInheritance(), one
+ must first cache the results of FindIncrementallyInheritablePrimvars() for
+ each non-leaf prim on the stage. """
+)
+{
+}
+
+class "Xformable" (
+ doc = """Base class for all transformable prims, which allows arbitrary
+ sequences of component affine transformations to be encoded.
+
+ \\note
+ You may find it useful to review while reading
+ this class description.
+
+ Supported Component Transformation Operations
+
+ UsdGeomXformable currently supports arbitrary sequences of the following
+ operations, each of which can be encoded in an attribute of the proper
+ shape in any supported precision:
+ - translate - 3D
+ - scale - 3D
+ - rotateX - 1D angle in degrees
+ - rotateY - 1D angle in degrees
+ - rotateZ - 1D angle in degrees
+ - rotateABC - 3D where ABC can be any combination of the six principle
+ Euler Angle sets: XYZ, XZY, YXZ, YZX, ZXY, ZYX. See
+ \"note on rotation packing order\"
+ - orient - 4D (quaternion)
+ - transform - 4x4D
+
+ Creating a Component Transformation
+
+ To add components to a UsdGeomXformable prim, simply call AddXformOp()
+ with the desired op type, as enumerated in \\ref UsdGeomXformOp::Type,
+ and the desired precision, which is one of \\ref UsdGeomXformOp::Precision.
+ Optionally, you can also provide an \"op suffix\" for the operator that
+ disambiguates it from other components of the same type on the same prim.
+ Application-specific transform schemas can use the suffixes to fill a role
+ similar to that played by AbcGeom::XformOp's \"Hint\" enums for their own
+ round-tripping logic.
+
+ We also provide specific \"Add\" API for each type, for clarity and
+ conciseness, e.g. AddTranslateOp(), AddRotateXYZOp() etc.
+
+ AddXformOp() will return a UsdGeomXformOp object, which is a schema on a
+ newly created UsdAttribute that provides convenience API for authoring
+ and computing the component transformations. The UsdGeomXformOp can then
+ be used to author any number of timesamples and default for the op.
+
+ Each successive call to AddXformOp() adds an operator that will be applied
+ \"more locally\" than the preceding operator, just as if we were pushing
+ transforms onto a transformation stack - which is precisely what should
+ happen when the operators are consumed by a reader.
+
+ \\note
+ If you can, please try to use the UsdGeomXformCommonAPI, which wraps
+ the UsdGeomXformable with an interface in which Op creation is taken
+ care of for you, and there is a much higher chance that the data you
+ author will be importable without flattening into other DCC's, as it
+ conforms to a fixed set of Scale-Rotate-Translate Ops.
+
+ \\sa \"Using the Authoring API\"
+
+ Data Encoding and Op Ordering
+
+ Because there is no \"fixed schema\" of operations, all of the attributes
+ that encode transform operations are dynamic, and are scoped in
+ the namespace \"xformOp\". The second component of an attribute's name provides
+ the type of operation, as listed above. An \"xformOp\" attribute can
+ have additional namespace components derived from the opSuffix argument
+ to the AddXformOp() suite of methods, which provides a preferred way of
+ naming the ops such that we can have multiple \"translate\" ops with unique
+ attribute names. For example, in the attribute named
+ \"xformOp:translate:maya:pivot\", \"translate\" is the type of operation and
+ \"maya:pivot\" is the suffix.
+
+ The following ordered list of attribute declarations in usda
+ define a basic Scale-Rotate-Translate with XYZ Euler angles, wherein the
+ translation is double-precision, and the remainder of the ops are single,
+ in which we will:
+
+
+
Scale by 2.0 in each dimension
+
Rotate about the X, Y, and Z axes by 30, 60, and 90 degrees, respectively
+
Translate by 100 units in the Y direction
+
+
+ \\code
+ float3 xformOp:rotateXYZ = (30, 60, 90)
+ float3 xformOp:scale = (2, 2, 2)
+ double3 xformOp:translate = (0, 100, 0)
+ uniform token[] xformOpOrder = [ \"xformOp:translate\", \"xformOp:rotateXYZ\", \"xformOp:scale\" ]
+ \\endcode
+
+ The attributes appear in the dictionary order in which USD, by default,
+ sorts them. To ensure the ops are recovered and evaluated in the correct
+ order, the schema introduces the **xformOpOrder** attribute, which
+ contains the names of the op attributes, in the precise sequence in which
+ they should be pushed onto a transform stack. **Note** that the order is
+ opposite to what you might expect, given the matrix algebra described in
+ This also dictates order of op creation,
+ since each call to AddXformOp() adds a new op to the end of the
+ \\b xformOpOrder array, as a new \"most-local\" operation. See
+ \"Example 2 below\" for C++ code that could
+ have produced this USD.
+
+ If it were important for the prim's rotations to be independently
+ overridable, we could equivalently (at some performance cost) encode
+ the transformation also like so:
+ \\code
+ float xformOp:rotateX = 30
+ float xformOp:rotateY = 60
+ float xformOp:rotateZ = 90
+ float3 xformOp:scale = (2, 2, 2)
+ double3 xformOp:translate = (0, 100, 0)
+ uniform token[] xformOpOrder = [ \"xformOp:translate\", \"xformOp:rotateZ\", \"xformOp:rotateY\", \"xformOp:rotateX\", \"xformOp:scale\" ]
+ \\endcode
+
+ Again, note that although we are encoding an XYZ rotation, the three
+ rotations appear in the **xformOpOrder** in the opposite order, with Z,
+ followed, by Y, followed by X.
+
+ Were we to add a Maya-style scalePivot to the above example, it might
+ look like the following:
+ \\code
+ float3 xformOp:rotateXYZ = (30, 60, 90)
+ float3 xformOp:scale = (2, 2, 2)
+ double3 xformOp:translate = (0, 100, 0)
+ double3 xformOp:translate:scalePivot
+ uniform token[] xformOpOrder = [ \"xformOp:translate\", \"xformOp:rotateXYZ\", \"xformOp:translate:scalePivot\", \"xformOp:scale\" ]
+ \\endcode
+
+ Paired \"Inverted\" Ops
+
+ We have been claiming that the ordered list of ops serves as a set
+ of instructions to a transform stack, but you may have noticed in the last
+ example that there is a missing operation - the pivot for the scale op
+ needs to be applied in its inverse-form as a final (most local) op! In the
+ AbcGeom::Xform schema, we would have encoded an actual \"final\" translation
+ op whose value was authored by the exporter as the negation of the pivot's
+ value. However, doing so would be brittle in USD, given that each op can
+ be independently overridden, and the constraint that one attribute must be
+ maintained as the negation of the other in order for successful
+ re-importation of the schema cannot be expressed in USD.
+
+ Our solution leverages the **xformOpOrder** member of the schema, which,
+ in addition to ordering the ops, may also contain one of two special
+ tokens that address the paired op and \"stack resetting\" behavior.
+
+ The \"paired op\" behavior is encoded as an \"!invert!\" prefix in
+ \\b xformOpOrder, as the result of an AddXformOp(isInverseOp=True) call.
+ The \\b xformOpOrder for the last example would look like:
+ \\code
+ uniform token[] xformOpOrder = [ \"xformOp:translate\", \"xformOp:rotateXYZ\", \"xformOp:translate:scalePivot\", \"xformOp:scale\", \"!invert!xformOp:translate:scalePivot\" ]
+ \\endcode
+
+ When asked for its value via UsdGeomXformOp::GetOpTransform(), an
+ \"inverted\" Op (i.e. the \"inverted\" half of a set of paired Ops) will fetch
+ the value of its paired attribute and return its negation. This works for
+ all op types - an error will be issued if a \"transform\" type op is singular
+ and cannot be inverted. When getting the authored value of an inverted op
+ via UsdGeomXformOp::Get(), the raw, uninverted value of the associated
+ attribute is returned.
+
+ For the sake of robustness, setting a value on an inverted op is disallowed.
+ Attempting to set a value on an inverted op will result in a coding error
+ and no value being set.
+
+ Resetting the Transform Stack
+
+ The other special op/token that can appear in xformOpOrder is
+ \"!resetXformStack!\", which, appearing as the first element of
+ xformOpOrder, indicates this prim should not inherit the transformation
+ of its namespace parent. See SetResetXformStack()
+
+ Expected Behavior for \"Missing\" Ops
+
+ If an importer expects Scale-Rotate-Translate operations, but a prim
+ has only translate and rotate ops authored, the importer should assume
+ an identity scale. This allows us to optimize the data a bit, if only
+ a few components of a very rich schema (like Maya's) are authored in the
+ app.
+
+ \\anchor usdGeom_xformableExamples
+ Using the C++ API
+
+ #1. Creating a simple transform matrix encoding
+ \\snippet examples_usdGeom.cpp CreateMatrixWithDefault
+
+ #2. Creating the simple SRT from the example above
+ \\snippet examples_usdGeom.cpp CreateExampleSRT
+
+ #3. Creating a parameterized SRT with pivot using UsdGeomXformCommonAPI
+ \\snippet examples_usdGeom.cpp CreateSRTWithDefaults
+
+ #4. Creating a rotate-only pivot transform with animated
+ rotation and translation
+ \\snippet examples_usdGeom.cpp CreateAnimatedTransform
+
+"""
+)
+{
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Scope "Scope" (
+ doc = """Scope is the simplest grouping primitive, and does not carry the
+ baggage of transformability. Note that transforms should inherit down
+ through a Scope successfully - it is just a guaranteed no-op from a
+ transformability perspective."""
+)
+{
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+}
+
+class Xform "Xform" (
+ doc = "Concrete prim schema for a transform, which implements Xformable "
+)
+{
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "Boundable" (
+ doc = """Boundable introduces the ability for a prim to persistently
+ cache a rectilinear, local-space, extent.
+
+ Why Extent and not Bounds ?
+ Boundable introduces the notion of \"extent\", which is a cached computation
+ of a prim's local-space 3D range for its resolved attributes at the
+ layer and time in which extent is authored. We have found that with
+ composed scene description, attempting to cache pre-computed bounds at
+ interior prims in a scene graph is very fragile, given the ease with which
+ one can author a single attribute in a stronger layer that can invalidate
+ many authored caches - or with which a re-published, referenced asset can
+ do the same.
+
+ Therefore, we limit to precomputing (generally) leaf-prim extent, which
+ avoids the need to read in large point arrays to compute bounds, and
+ provides UsdGeomBBoxCache the means to efficiently compute and
+ (session-only) cache intermediate bounds. You are free to compute and
+ author intermediate bounds into your scenes, of course, which may work
+ well if you have sufficient locks on your pipeline to guarantee that once
+ authored, the geometry and transforms upon which they are based will
+ remain unchanged, or if accuracy of the bounds is not an ironclad
+ requisite.
+
+ When intermediate bounds are authored on Boundable parents, the child prims
+ will be pruned from BBox computation; the authored extent is expected to
+ incorporate all child bounds."""
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "Gprim" (
+ doc = '''Base class for all geometric primitives.
+
+ Gprim encodes basic graphical properties such as doubleSided and
+ orientation, and provides primvars for "display color" and "display
+ opacity" that travel with geometry to be used as shader overrides. '''
+)
+{
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Cube "Cube" (
+ doc = """Defines a primitive rectilinear cube centered at the origin.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+)
+{
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent = [(-1, -1, -1), (1, 1, 1)] (
+ doc = """Extent is re-defined on Cube only to provide a fallback value.
+ \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double size = 2 (
+ doc = """Indicates the length of each edge of the cube. If you
+ author size you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Sphere "Sphere" (
+ doc = """Defines a primitive sphere centered at the origin.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+)
+{
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent = [(-1, -1, -1), (1, 1, 1)] (
+ doc = """Extent is re-defined on Sphere only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double radius = 1 (
+ doc = """Indicates the sphere's radius. If you
+ author radius you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Cylinder "Cylinder" (
+ doc = """Defines a primitive cylinder with closed ends, centered at the
+ origin, whose spine is along the specified axis.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+)
+{
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = "The axis along which the spine of the cylinder is aligned"
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent = [(-1, -1, -1), (1, 1, 1)] (
+ doc = """Extent is re-defined on Cylinder only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ double height = 2 (
+ doc = """The size of the cylinder's spine along the specified
+ axis. If you author height you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double radius = 1 (
+ doc = """The radius of the cylinder. If you author radius
+ you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Capsule "Capsule" (
+ doc = """Defines a primitive capsule, i.e. a cylinder capped by two half
+ spheres, centered at the origin, whose spine is along the specified
+ axis.
+ The spherical cap heights (sagitta) of the two endcaps are a function of
+ the relative radii of the endcaps, such that cylinder tangent and sphere
+ tangent are coincident and maintain C1 continuity."""
+)
+{
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = "The axis along which the spine of the capsule is aligned"
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent = [(-0.5, -0.5, -1), (0.5, 0.5, 1)] (
+ doc = """Extent is re-defined on Capsule only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ double height = 1 (
+ doc = """The length of the capsule's spine along the specified
+ axis excluding the size of the two half spheres, i.e.
+ the length of the cylinder portion of the capsule.
+ If you author height you must also author extent.
+ \\sa GetExtentAttr()"""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double radius = 0.5 (
+ doc = """The radius of the capsule. If you
+ author radius you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Cone "Cone" (
+ doc = """Defines a primitive cone, centered at the origin, whose spine
+ is along the specified axis, with the apex of the cone pointing
+ in the direction of the positive axis.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+)
+{
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = "The axis along which the spine of the cone is aligned"
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent = [(-1, -1, -1), (1, 1, 1)] (
+ doc = """Extent is re-defined on Cone only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ double height = 2 (
+ doc = """The length of the cone's spine along the specified
+ axis. If you author height you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double radius = 1 (
+ doc = """The radius of the cone. If you
+ author radius you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Cylinder_1 "Cylinder_1" (
+ doc = """Defines a primitive cylinder with closed ends, centered at the
+ origin, whose spine is along the specified axis, with a pair of radii
+ describing the size of the end points.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+)
+{
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = "The axis along which the spine of the cylinder is aligned"
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent = [(-1, -1, -1), (1, 1, 1)] (
+ doc = """Extent is re-defined on Cylinder only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ double height = 2 (
+ doc = """The length of the cylinder's spine along the specified
+ axis. If you author height you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double radiusBottom = 1 (
+ doc = """The radius of the bottom of the cylinder - i.e. the face
+ point located along the negative axis. If you author
+ radiusBottom you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ double radiusTop = 1 (
+ doc = """The radius of the top of the cylinder - i.e. the face located
+ along the positive axis. If you author radiusTop you must also
+ author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Capsule_1 "Capsule_1" (
+ doc = """Defines a primitive capsule, i.e. a cylinder capped by two half
+ spheres, with potentially different radii, centered at the origin, and whose
+ spine is along the specified axis.
+ The spherical cap heights (sagitta) of the two endcaps are a function of
+ the relative radii of the endcaps, such that cylinder tangent and sphere
+ tangent are coincident and maintain C1 continuity."""
+)
+{
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = "The axis along which the spine of the capsule is aligned"
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent = [(-0.5, -0.5, -1), (0.5, 0.5, 1)] (
+ doc = """Extent is re-defined on Capsule only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ double height = 1 (
+ doc = """The length of the capsule's spine along the specified
+ axis excluding the size of the two half spheres, i.e.
+ the length of the cylinder portion of the capsule.
+ If you author height you must also author extent.
+ \\sa GetExtentAttr()"""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double radiusBottom = 0.5 (
+ doc = """The radius of the capping sphere at the bottom of the capsule -
+ i.e. the sphere located in the direction of the negative axis. If
+ you author radius you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ double radiusTop = 0.5 (
+ doc = """The radius of the capping sphere at the top of the capsule -
+ i.e. the sphere in the direction of the positive axis. If you
+ author radius you must also author extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Plane "Plane" (
+ doc = """Defines a primitive plane, centered at the origin, and is defined by
+ a cardinal axis, width, and length. The plane is double-sided by default.
+
+ The axis of width and length are perpendicular to the plane's axis:
+
+ axis | width | length
+ ----- | ------ | -------
+ X | z-axis | y-axis
+ Y | x-axis | z-axis
+ Z | x-axis | y-axis
+
+ """
+)
+{
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = """The axis along which the surface of the plane is aligned. When set
+ to 'Z' the plane is in the xy-plane; when axis is 'X' the plane is in
+ the yz-plane, and when axis is 'Y' the plane is in the xz-plane.
+
+ \\sa UsdGeomGprim::GetAxisAttr()."""
+ )
+ uniform bool doubleSided = 1 (
+ doc = """Planes are double-sided by default. Clients may also support
+ single-sided planes.
+
+ \\sa UsdGeomGprim::GetDoubleSidedAttr()"""
+ )
+ float3[] extent = [(-1, -1, 0), (1, 1, 0)] (
+ doc = """Extent is re-defined on Plane only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+ double length = 2 (
+ doc = """The length of the plane, which aligns to the y-axis when axis is
+ 'Z' or 'X', or to the z-axis when axis is 'Y'. If you author length
+ you must also author extent.
+
+ \\sa UsdGeomGprim::GetExtentAttr()"""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ double width = 2 (
+ doc = """The width of the plane, which aligns to the x-axis when axis is
+ 'Z' or 'Y', or to the z-axis when axis is 'X'. If you author width
+ you must also author extent.
+
+ \\sa UsdGeomGprim::GetExtentAttr()"""
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "PointBased" (
+ doc = """Base class for all UsdGeomGprims that possess points,
+ providing common attributes such as normals and velocities."""
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Mesh "Mesh" (
+ doc = """Encodes a mesh with optional subdivision properties and features.
+
+ As a point-based primitive, meshes are defined in terms of points that
+ are connected into edges and faces. Many references to meshes use the
+ term 'vertex' in place of or interchangeably with 'points', while some
+ use 'vertex' to refer to the 'face-vertices' that define a face. To
+ avoid confusion, the term 'vertex' is intentionally avoided in favor of
+ 'points' or 'face-vertices'.
+
+ The connectivity between points, edges and faces is encoded using a
+ common minimal topological description of the faces of the mesh. Each
+ face is defined by a set of face-vertices using indices into the Mesh's
+ _points_ array (inherited from UsdGeomPointBased) and laid out in a
+ single linear _faceVertexIndices_ array for efficiency. A companion
+ _faceVertexCounts_ array provides, for each face, the number of
+ consecutive face-vertices in _faceVertexIndices_ that define the face.
+ No additional connectivity information is required or constructed, so
+ no adjacency or neighborhood queries are available.
+
+ A key property of this mesh schema is that it encodes both subdivision
+ surfaces and simpler polygonal meshes. This is achieved by varying the
+ _subdivisionScheme_ attribute, which is set to specify Catmull-Clark
+ subdivision by default, so polygonal meshes must always be explicitly
+ declared. The available subdivision schemes and additional subdivision
+ features encoded in optional attributes conform to the feature set of
+ OpenSubdiv
+ (https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html).
+
+ \\anchor UsdGeom_Mesh_Primvars
+ __A Note About Primvars__
+
+ The following list clarifies the number of elements for and the
+ interpolation behavior of the different primvar interpolation types
+ for meshes:
+
+ - __constant__: One element for the entire mesh; no interpolation.
+ - __uniform__: One element for each face of the mesh; elements are
+ typically not interpolated but are inherited by other faces derived
+ from a given face (via subdivision, tessellation, etc.).
+ - __varying__: One element for each point of the mesh;
+ interpolation of point data is always linear.
+ - __vertex__: One element for each point of the mesh;
+ interpolation of point data is applied according to the
+ _subdivisionScheme_ attribute.
+ - __faceVarying__: One element for each of the face-vertices that
+ define the mesh topology; interpolation of face-vertex data may
+ be smooth or linear, according to the _subdivisionScheme_ and
+ _faceVaryingLinearInterpolation_ attributes.
+
+ Primvar interpolation types and related utilities are described more
+ generally in \\ref Usd_InterpolationVals.
+
+ \\anchor UsdGeom_Mesh_Normals
+ __A Note About Normals__
+
+ Normals should not be authored on a subdivision mesh, since subdivision
+ algorithms define their own normals. They should only be authored for
+ polygonal meshes (_subdivisionScheme_ = \"none\").
+
+ The _normals_ attribute inherited from UsdGeomPointBased is not a generic
+ primvar, but the number of elements in this attribute will be determined by
+ its _interpolation_. See .
+ If _normals_ and _primvars:normals_ are both specified, the latter has
+ precedence. If a polygonal mesh specifies __neither__ _normals_ nor
+ _primvars:normals_, then it should be treated and rendered as faceted,
+ with no attempt to compute smooth normals.
+
+ The normals generated for smooth subdivision schemes, e.g. Catmull-Clark
+ and Loop, will likewise be smooth, but others, e.g. Bilinear, may be
+ discontinuous between faces and/or within non-planar irregular faces."""
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ int[] cornerIndices = [] (
+ doc = """The indices of points for which a corresponding sharpness
+ value is specified in _cornerSharpnesses_ (so the size of this array
+ must match that of _cornerSharpnesses_)."""
+ )
+ float[] cornerSharpnesses = [] (
+ doc = """The sharpness values associated with a corresponding set of
+ points specified in _cornerIndices_ (so the size of this array must
+ match that of _cornerIndices_). Use the constant `SHARPNESS_INFINITE`
+ for a perfectly sharp corner."""
+ )
+ int[] creaseIndices = [] (
+ doc = """The indices of points grouped into sets of successive pairs
+ that identify edges to be creased. The size of this array must be
+ equal to the sum of all elements of the _creaseLengths_ attribute."""
+ )
+ int[] creaseLengths = [] (
+ doc = """The length of this array specifies the number of creases
+ (sets of adjacent sharpened edges) on the mesh. Each element gives
+ the number of points of each crease, whose indices are successively
+ laid out in the _creaseIndices_ attribute. Since each crease must
+ be at least one edge long, each element of this array must be at
+ least two."""
+ )
+ float[] creaseSharpnesses = [] (
+ doc = """The per-crease or per-edge sharpness values for all creases.
+ Since _creaseLengths_ encodes the number of points in each crease,
+ the number of elements in this array will be either len(creaseLengths)
+ or the sum over all X of (creaseLengths[X] - 1). Note that while
+ the RI spec allows each crease to have either a single sharpness
+ or a value per-edge, USD will encode either a single sharpness
+ per crease on a mesh, or sharpnesses for all edges making up
+ the creases on a mesh. Use the constant `SHARPNESS_INFINITE` for a
+ perfectly sharp crease."""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ token faceVaryingLinearInterpolation = "cornersPlus1" (
+ allowedTokens = ["none", "cornersOnly", "cornersPlus1", "cornersPlus2", "boundaries", "all"]
+ doc = '''Specifies how elements of a primvar of interpolation type
+ "faceVarying" are interpolated for subdivision surfaces. Interpolation
+ can be as smooth as a "vertex" primvar or constrained to be linear at
+ features specified by several options. Valid values correspond to
+ choices available in OpenSubdiv:
+
+ - __none__: No linear constraints or sharpening, smooth everywhere
+ - __cornersOnly__: Sharpen corners of discontinuous boundaries only,
+ smooth everywhere else
+ - __cornersPlus1__: The default, same as "cornersOnly" plus additional
+ sharpening at points where three or more distinct face-varying
+ values occur
+ - __cornersPlus2__: Same as "cornersPlus1" plus additional sharpening
+ at points with at least one discontinuous boundary corner or
+ only one discontinuous boundary edge (a dart)
+ - __boundaries__: Piecewise linear along discontinuous boundaries,
+ smooth interior
+ - __all__: Piecewise linear everywhere
+
+ These are illustrated and described in more detail in the OpenSubdiv
+ documentation:
+ https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html#face-varying-interpolation-rules'''
+ )
+ int[] faceVertexCounts (
+ doc = """Provides the number of vertices in each face of the mesh,
+ which is also the number of consecutive indices in _faceVertexIndices_
+ that define the face. The length of this attribute is the number of
+ faces in the mesh. If this attribute has more than
+ one timeSample, the mesh is considered to be topologically varying."""
+ )
+ int[] faceVertexIndices (
+ doc = """Flat list of the index (into the _points_ attribute) of each
+ vertex of each face in the mesh. If this attribute has more than
+ one timeSample, the mesh is considered to be topologically varying."""
+ )
+ int[] holeIndices = [] (
+ doc = """The indices of all faces that should be treated as holes,
+ i.e. made invisible. This is traditionally a feature of subdivision
+ surfaces and not generally applied to polygonal meshes."""
+ )
+ token interpolateBoundary = "edgeAndCorner" (
+ allowedTokens = ["none", "edgeOnly", "edgeAndCorner"]
+ doc = '''Specifies how subdivision is applied for faces adjacent to
+ boundary edges and boundary points. Valid values correspond to choices
+ available in OpenSubdiv:
+
+ - __none__: No boundary interpolation is applied and boundary faces are
+ effectively treated as holes
+ - __edgeOnly__: A sequence of boundary edges defines a smooth curve to
+ which the edges of subdivided boundary faces converge
+ - __edgeAndCorner__: The default, similar to "edgeOnly" but the smooth
+ boundary curve is made sharp at corner points
+
+ These are illustrated and described in more detail in the OpenSubdiv
+ documentation:
+ https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html#boundary-interpolation-rules'''
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ uniform token subdivisionScheme = "catmullClark" (
+ allowedTokens = ["catmullClark", "loop", "bilinear", "none"]
+ doc = '''The subdivision scheme to be applied to the surface.
+ Valid values are:
+
+ - __catmullClark__: The default, Catmull-Clark subdivision; preferred
+ for quad-dominant meshes (generalizes B-splines); interpolation
+ of point data is smooth (non-linear)
+ - __loop__: Loop subdivision; preferred for purely triangular meshes;
+ interpolation of point data is smooth (non-linear)
+ - __bilinear__: Subdivision reduces all faces to quads (topologically
+ similar to "catmullClark"); interpolation of point data is bilinear
+ - __none__: No subdivision, i.e. a simple polygonal mesh; interpolation
+ of point data is linear
+
+ Polygonal meshes are typically lighter weight and faster to render,
+ depending on renderer and render mode. Use of "bilinear" will produce
+ a similar shape to a polygonal mesh and may offer additional guarantees
+ of watertightness and additional subdivision features (e.g. holes) but
+ may also not respect authored normals.'''
+ )
+ token triangleSubdivisionRule = "catmullClark" (
+ allowedTokens = ["catmullClark", "smooth"]
+ doc = '''Specifies an option to the subdivision rules for the
+ Catmull-Clark scheme to try and improve undesirable artifacts when
+ subdividing triangles. Valid values are "catmullClark" for the
+ standard rules (the default) and "smooth" for the improvement.
+
+ See https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html#triangle-subdivision-rule'''
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class TetMesh "TetMesh" (
+ doc = """Encodes a tetrahedral mesh. A tetrahedral mesh is defined as a set of
+ tetrahedra. Each tetrahedron is defined by a set of 4 points, with the
+ triangles of the tetrahedron determined from these 4 points as described in
+ the tetVertexIndices attribute description. The mesh surface faces
+ are encoded as triangles. Surface faces must be provided for consumers
+ that need to do surface calculations, such as renderers or consumers using
+ physics attachments. Both tetrahedra and surface face definitions use
+ indices into the TetMesh's points attribute, inherited from
+ UsdGeomPointBased."""
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ int3[] surfaceFaceVertexIndices (
+ doc = """surfaceFaceVertexIndices defines the triangle
+ surface faces indices wrt. points of the tetmesh surface. Again
+ the orientation attribute inherited from UsdGeomPrim should be
+ set accordingly. The orientation for faces of tetrahedra and
+ surface faces must match."""
+ )
+ int4[] tetVertexIndices (
+ doc = '''Flat list of the index (into the points attribute) of
+ each vertex of each tetrahedron in the mesh. Each int4 corresponds to the
+ indices of a single tetrahedron. Users should set the orientation
+ attribute of UsdGeomPrim accordingly. That is if the orientation
+ is "rightHanded", the CCW face ordering of a tetrahedron is
+ [123],[032],[013],[021] with respect to the int4. This results in the
+ normals facing outward from the center of the tetrahedron. The following
+ diagram shows the face ordering of an unwrapped tetrahedron with
+ "rightHanded" orientation.
+
+ \\image html USDTetMeshRightHanded.svg
+
+ If the orientation attribute is set to "leftHanded" the face
+ ordering of the tetrahedron is [321],[230],[310],[120] and the
+ leftHanded CW face normals point outward from the center of the
+ tetrahedron. The following diagram shows the face ordering of an
+ unwrapped tetrahedron with "leftHanded" orientation.
+
+ \\image html USDTetMeshLeftHanded.svg
+
+ Setting the orientation attribute to align with the
+ ordering of the int4 for the tetrahedrons is the responsibility of the
+ user.'''
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class GeomSubset "GeomSubset" (
+ doc = """Encodes a subset of a piece of geometry (i.e. a UsdGeomImageable)
+ as a set of indices. Currently supports encoding subsets of faces,
+ points, edges, and tetrahedrons.
+
+ To apply to a geometric prim, a GeomSubset prim must be the prim's direct
+ child in namespace, and possess a concrete defining specifier (i.e. def).
+ This restriction makes it easy and efficient to discover subsets of a prim.
+ We might want to relax this restriction if it's common to have multiple
+ families of subsets on a gprim and if it's useful to be able to
+ organize subsets belonging to a family under a common scope. See
+ 'familyName' attribute for more info on defining a family of subsets.
+
+ Note that a GeomSubset isn't an imageable (i.e. doesn't derive from
+ UsdGeomImageable). So, you can't author visibility for it or
+ override its purpose.
+
+ Materials are bound to GeomSubsets just as they are for regular
+ geometry using API available in UsdShade (UsdShadeMaterial::Bind).
+"""
+)
+{
+ uniform token elementType = "face" (
+ allowedTokens = ["face", "point", "edge", "tetrahedron"]
+ doc = """The type of element that the indices target. \"elementType\" can
+ have one of the following values:
+
face: Identifies faces on a Gprim's surface. For a
+ UsdGeomMesh, each element of the _indices_ attribute would refer to
+ an element of the Mesh's _faceCounts_ attribute. For a UsdGeomTetMesh,
+ each element of the _indices_ attribute would refer to an element of
+ the Mesh's _surfaceFaceVertexIndices_ attribute.
+
point: for any UsdGeomPointBased, each
+ element of the _indices_ attribute would refer to an element of the
+ Mesh's _points_ attribute
+
edge: for any UsdGeomMesh, each pair of elements
+ in the _indices_ attribute would refer to a pair of points of the
+ Mesh's _points_ attribute that are connected as an implicit edge on the
+ Mesh. These edges are derived from the Mesh's _faceVertexIndices_
+ attribute. Edges are not currently defined for a UsdGeomTetMesh, but
+ could be derived from all tetrahedron edges or surface face edges only
+ if a specific use-case arises.
+
tetrahedron: for any UsdGeomTetMesh, each element of the
+ _indices_ attribute would refer to an element of the TetMesh's
+ _tetVertexIndices_ attribute.
+
"""
+ )
+ uniform token familyName = "" (
+ doc = '''The name of the family of subsets that this subset belongs to.
+ This is optional and is primarily useful when there are multiple
+ families of subsets under a geometric prim. In some cases, this could
+ also be used for achieving proper roundtripping of subset data between
+ DCC apps.
+ When multiple subsets belonging to a prim have the same familyName, they
+ are said to belong to the family. A familyType value can be
+ encoded on the owner of a family of subsets as a token using the static
+ method UsdGeomSubset::SetFamilyType(). "familyType" can have one of the
+ following values:
+
UsdGeomTokens->partition: implies that every element of
+ the whole geometry appears exactly once in only one of the subsets
+ belonging to the family.
+
UsdGeomTokens->nonOverlapping: an element that appears in one
+ subset may not appear in any other subset belonging to the family, and
+ appears only once in the subset in which it appears.
+
UsdGeomTokens->unrestricted: implies that there are no
+ restrictions w.r.t. the membership of elements in the subsets. They
+ could be overlapping and the union of all subsets in the family may
+ not represent the whole.
+
+ \\note The validity of subset data is not enforced by the authoring
+ APIs, however they can be checked using UsdGeomSubset::ValidateFamily().
+ '''
+ )
+ int[] indices = [] (
+ doc = """The set of indices included in this subset. The indices need not
+ be sorted, but the same index should not appear more than once. Indices
+ are invalid if outside the range [0, elementCount) for the given time on
+ the parent geometric prim."""
+ )
+}
+
+class NurbsPatch "NurbsPatch" (
+ doc = """Encodes a rational or polynomial non-uniform B-spline
+ surface, with optional trim curves.
+
+ The encoding mostly follows that of RiNuPatch and RiTrimCurve:
+ https://renderman.pixar.com/resources/RenderMan_20/geometricPrimitives.html#rinupatch , with some minor renaming and coalescing for clarity.
+
+ The layout of control vertices in the points attribute inherited
+ from UsdGeomPointBased is row-major with U considered rows, and V columns.
+
+ \\anchor UsdGeom_NurbsPatch_Form
+ NurbsPatch Form
+
+ The authored points, orders, knots, weights, and ranges are all that is
+ required to render the nurbs patch. However, the only way to model closed
+ surfaces with nurbs is to ensure that the first and last control points
+ along the given axis are coincident. Similarly, to ensure the surface is
+ not only closed but also C2 continuous, the last order - 1 control
+ points must be (correspondingly) coincident with the first order - 1
+ control points, and also the spacing of the last corresponding knots
+ must be the same as the first corresponding knots.
+
+ Form is provided as an aid to interchange between modeling and
+ animation applications so that they can robustly identify the intent with
+ which the surface was modelled, and take measures (if they are able) to
+ preserve the continuity/concidence constraints as the surface may be rigged
+ or deformed.
+ - An open-form NurbsPatch has no continuity constraints.
+ - A closed-form NurbsPatch expects the first and last control points
+ to overlap
+ - A periodic-form NurbsPatch expects the first and last
+ order - 1 control points to overlap.
+
+ Nurbs vs Subdivision Surfaces
+
+ Nurbs are an important modeling primitive in CAD/CAM tools and early
+ computer graphics DCC's. Because they have a natural UV parameterization
+ they easily support \"trim curves\", which allow smooth shapes to be
+ carved out of the surface.
+
+ However, the topology of the patch is always rectangular, and joining two
+ nurbs patches together (especially when they have differing numbers of
+ spans) is difficult to do smoothly. Also, nurbs are not supported by
+ the Ptex texturing technology (http://ptex.us).
+
+ Neither of these limitations are shared by subdivision surfaces; therefore,
+ although they do not subscribe to trim-curve-based shaping, subdivs are
+ often considered a more flexible modeling primitive.
+ """
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ double[] pointWeights (
+ doc = """Optionally provides \"w\" components for each control point,
+ thus must be the same length as the points attribute. If authored,
+ the patch will be rational. If unauthored, the patch will be
+ polynomial, i.e. weight for all points is 1.0.
+ \\note Some DCC's pre-weight the points, but in this schema,
+ points are not pre-weighted."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ int[] trimCurve:counts (
+ doc = '''Each element specifies how many curves are present in each
+ "loop" of the trimCurve, and the length of the array determines how
+ many loops the trimCurve contains. The sum of all elements is the
+ total nuber of curves in the trim, to which we will refer as
+ nCurves in describing the other trim attributes.'''
+ )
+ double[] trimCurve:knots (
+ doc = """Flat list of parametric values for each of the
+ nCurves curves. There will be as many knots as the sum over
+ all elements of vertexCounts plus the sum over all elements of
+ orders."""
+ )
+ int[] trimCurve:orders (
+ doc = "Flat list of orders for each of the nCurves curves."
+ )
+ double3[] trimCurve:points (
+ doc = """Flat list of homogeneous 2D points (u, v, w) that comprise
+ the nCurves curves. The number of points should be equal to the
+ um over all elements of vertexCounts."""
+ )
+ double2[] trimCurve:ranges (
+ doc = """Flat list of minimum and maximum parametric values
+ (as defined by knots) for each of the nCurves curves."""
+ )
+ int[] trimCurve:vertexCounts (
+ doc = """Flat list of number of vertices for each of the
+ nCurves curves."""
+ )
+ uniform token uForm = "open" (
+ allowedTokens = ["open", "closed", "periodic"]
+ doc = '''Interpret the control grid and knot vectors as representing
+ an open, geometrically closed, or geometrically closed and C2 continuous
+ surface along the U dimension.
+ \\sa "NurbsPatch Form" '''
+ )
+ double[] uKnots (
+ doc = """Knot vector for U direction providing U parameterization.
+ The length of this array must be ( uVertexCount + uOrder ), and its
+ entries must take on monotonically increasing values."""
+ )
+ int uOrder (
+ doc = """Order in the U direction. Order must be positive and is
+ equal to the degree of the polynomial basis to be evaluated, plus 1."""
+ )
+ double2 uRange (
+ doc = """Provides the minimum and maximum parametric values (as defined
+ by uKnots) over which the surface is actually defined. The minimum
+ must be less than the maximum, and greater than or equal to the
+ value of uKnots[uOrder-1]. The maxium must be less than or equal
+ to the last element's value in uKnots."""
+ )
+ int uVertexCount (
+ doc = """Number of vertices in the U direction. Should be at least as
+ large as uOrder."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ uniform token vForm = "open" (
+ allowedTokens = ["open", "closed", "periodic"]
+ doc = '''Interpret the control grid and knot vectors as representing
+ an open, geometrically closed, or geometrically closed and C2 continuous
+ surface along the V dimension.
+ \\sa "NurbsPatch Form" '''
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ double[] vKnots (
+ doc = """Knot vector for V direction providing U parameterization.
+ The length of this array must be ( vVertexCount + vOrder ), and its
+ entries must take on monotonically increasing values."""
+ )
+ int vOrder (
+ doc = """Order in the V direction. Order must be positive and is
+ equal to the degree of the polynomial basis to be evaluated, plus 1."""
+ )
+ double2 vRange (
+ doc = """Provides the minimum and maximum parametric values (as defined
+ by vKnots) over which the surface is actually defined. The minimum
+ must be less than the maximum, and greater than or equal to the
+ value of vKnots[vOrder-1]. The maxium must be less than or equal
+ to the last element's value in vKnots."""
+ )
+ int vVertexCount (
+ doc = """Number of vertices in the V direction. Should be at least as
+ large as vOrder."""
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "Curves" (
+ doc = """Base class for UsdGeomBasisCurves, UsdGeomNurbsCurves, and
+ UsdGeomHermiteCurves. The BasisCurves schema is designed to be
+ analagous to offline renderers' notion of batched curves (such as
+ the classical RIB definition via Basis and Curves statements),
+ while the NurbsCurve schema is designed to be analgous to the
+ NURBS curves found in packages like Maya and Houdini while
+ retaining their consistency with the RenderMan specification for
+ NURBS Patches. HermiteCurves are useful for the
+ interchange of animation guides and paths.
+
+ It is safe to use the length of the curve vertex count to derive
+ the number of curves and the number and layout of curve vertices,
+ but this schema should NOT be used to derive the number of curve
+ points. While vertex indices are implicit in all shipped
+ descendent types of this schema, one should not assume that all
+ internal or future shipped schemas will follow this pattern. Be
+ sure to key any indexing behavior off the concrete type, not this
+ abstract type.
+ """
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ int[] curveVertexCounts (
+ doc = """Curves-derived primitives can represent multiple distinct,
+ potentially disconnected curves. The length of 'curveVertexCounts'
+ gives the number of such curves, and each element describes the
+ number of vertices in the corresponding curve"""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ float[] widths (
+ doc = """Provides width specification for the curves, whose application
+ will depend on whether the curve is oriented (normals are defined for
+ it), in which case widths are \"ribbon width\", or unoriented, in which
+ case widths are cylinder width. 'widths' is not a generic Primvar,
+ but the number of elements in this attribute will be determined by
+ its 'interpolation'. See . If 'widths'
+ and 'primvars:widths' are both specified, the latter has precedence."""
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class BasisCurves "BasisCurves" (
+ doc = """BasisCurves are a batched curve representation analogous to the
+ classic RIB definition via Basis and Curves statements. BasisCurves are
+ often used to render dense aggregate geometry like hair or grass.
+
+ A 'matrix' and 'vstep' associated with the basis are used to
+ interpolate the vertices of a cubic BasisCurves. (The basis attribute
+ is unused for linear BasisCurves.)
+
+ A single prim may have many curves whose count is determined implicitly by
+ the length of the curveVertexCounts vector. Each individual curve is
+ composed of one or more segments. Each segment is defined by four vertices
+ for cubic curves and two vertices for linear curves. See the next section
+ for more information on how to map curve vertex counts to segment counts.
+
+ Segment Indexing
+ Interpolating a curve requires knowing how to decompose it into its
+ individual segments.
+
+ The segments of a cubic curve are determined by the vertex count,
+ the wrap (periodicity), and the vstep of the basis. For linear
+ curves, the basis token is ignored and only the vertex count and
+ wrap are needed.
+
+ cubic basis | vstep
+ ------------- | ------
+ bezier | 3
+ catmullRom | 1
+ bspline | 1
+
+ The first segment of a cubic (nonperiodic) curve is always defined by its
+ first four points. The vstep is the increment used to determine what
+ vertex indices define the next segment. For a two segment (nonperiodic)
+ bspline basis curve (vstep = 1), the first segment will be defined by
+ interpolating vertices [0, 1, 2, 3] and the second segment will be defined
+ by [1, 2, 3, 4]. For a two segment bezier basis curve (vstep = 3), the
+ first segment will be defined by interpolating vertices [0, 1, 2, 3] and
+ the second segment will be defined by [3, 4, 5, 6]. If the vstep is not
+ one, then you must take special care to make sure that the number of cvs
+ properly divides by your vstep. (The indices described are relative to
+ the initial vertex index for a batched curve.)
+
+ For periodic curves, at least one of the curve's initial vertices are
+ repeated to close the curve. For cubic curves, the number of vertices
+ repeated is '4 - vstep'. For linear curves, only one vertex is repeated
+ to close the loop.
+
+ Pinned curves are a special case of nonperiodic curves that only affects
+ the behavior of cubic Bspline and Catmull-Rom curves. To evaluate or render
+ pinned curves, a client must effectively add 'phantom points' at the
+ beginning and end of every curve in a batch. These phantom points
+ are injected to ensure that the interpolated curve begins at P[0] and
+ ends at P[n-1].
+
+ For a curve with initial point P[0] and last point P[n-1], the phantom
+ points are defined as.
+ P[-1] = 2 * P[0] - P[1]
+ P[n] = 2 * P[n-1] - P[n-2]
+
+ Pinned cubic curves will (usually) have to be unpacked into the standard
+ nonperiodic representation before rendering. This unpacking can add some
+ additional overhead. However, using pinned curves reduces the amount of
+ data recorded in a scene and (more importantly) better records the
+ authors' intent for interchange.
+
+ \\note The additional phantom points mean that the minimum curve vertex
+ count for cubic bspline and catmullRom curves is 2.
+
+ Linear curve segments are defined by two vertices.
+ A two segment linear curve's first segment would be defined by
+ interpolating vertices [0, 1]. The second segment would be defined by
+ vertices [1, 2]. (Again, for a batched curve, indices are relative to
+ the initial vertex index.)
+
+ When validating curve topology, each renderable entry in the
+ curveVertexCounts vector must pass this check.
+
+ type | wrap | validitity
+ ------- | --------------------------- | ----------------
+ linear | nonperiodic | curveVertexCounts[i] > 2
+ linear | periodic | curveVertexCounts[i] > 3
+ cubic | nonperiodic | (curveVertexCounts[i] - 4) % vstep == 0
+ cubic | periodic | (curveVertexCounts[i]) % vstep == 0
+ cubic | pinned (catmullRom/bspline) | (curveVertexCounts[i] - 2) >= 0
+
+ Cubic Vertex Interpolation
+
+ \\image html USDCurveBasisMatrix.png width=750
+
+ Linear Vertex Interpolation
+
+ Linear interpolation is always used on curves of type linear.
+ 't' with domain [0, 1], the curve is defined by the equation
+ P0 * (1-t) + P1 * t. t at 0 describes the first point and t at 1 describes
+ the end point.
+
+ Primvar Interpolation
+
+ For cubic curves, primvar data can be either interpolated cubically between
+ vertices or linearly across segments. The corresponding token
+ for cubic interpolation is 'vertex' and for linear interpolation is
+ 'varying'. Per vertex data should be the same size as the number
+ of vertices in your curve. Segment varying data is dependent on the
+ wrap (periodicity) and number of segments in your curve. For linear curves,
+ varying and vertex data would be interpolated the same way. By convention
+ varying is the preferred interpolation because of the association of
+ varying with linear interpolation.
+
+ \\image html USDCurvePrimvars.png
+
+ To convert an entry in the curveVertexCounts vector into a segment count
+ for an individual curve, apply these rules. Sum up all the results in
+ order to compute how many total segments all curves have.
+
+ The following tables describe the expected segment count for the 'i'th
+ curve in a curve batch as well as the entire batch. Python syntax
+ like '[:]' (to describe all members of an array) and 'len(...)'
+ (to describe the length of an array) are used.
+
+ type | wrap | curve segment count | batch segment count
+ ------- | --------------------------- | -------------------------------------- | --------------------------
+ linear | nonperiodic | curveVertexCounts[i] - 1 | sum(curveVertexCounts[:]) - len(curveVertexCounts)
+ linear | periodic | curveVertexCounts[i] | sum(curveVertexCounts[:])
+ cubic | nonperiodic | (curveVertexCounts[i] - 4) / vstep + 1 | sum(curveVertexCounts[:] - 4) / vstep + len(curveVertexCounts)
+ cubic | periodic | curveVertexCounts[i] / vstep | sum(curveVertexCounts[:]) / vstep
+ cubic | pinned (catmullRom/bspline) | (curveVertexCounts[i] - 2) + 1 | sum(curveVertexCounts[:] - 2) + len(curveVertexCounts)
+
+ The following table descrives the expected size of varying
+ (linearly interpolated) data, derived from the segment counts computed
+ above.
+
+ wrap | curve varying count | batch varying count
+ ------------------- | ---------------------------- | ------------------------------------------------
+ nonperiodic/pinned | segmentCounts[i] + 1 | sum(segmentCounts[:]) + len(curveVertexCounts)
+ periodic | segmentCounts[i] | sum(segmentCounts[:])
+
+ Both curve types additionally define 'constant' interpolation for the
+ entire prim and 'uniform' interpolation as per curve data.
+
+
+ \\note Take care when providing support for linearly interpolated data for
+ cubic curves. Its shape doesn't provide a one to one mapping with either
+ the number of curves (like 'uniform') or the number of vertices (like
+ 'vertex') and so it is often overlooked. This is the only primitive in
+ UsdGeom (as of this writing) where this is true. For meshes, while they
+ use different interpolation methods, 'varying' and 'vertex' are both
+ specified per point. It's common to assume that curves follow a similar
+ pattern and build in structures and language for per primitive, per
+ element, and per point data only to come upon these arrays that don't
+ quite fit into either of those categories. It is
+ also common to conflate 'varying' with being per segment data and use the
+ segmentCount rules table instead of its neighboring varying data table
+ rules. We suspect that this is because for the common case of
+ nonperiodic cubic curves, both the provided segment count and varying data
+ size formula end with '+ 1'. While debugging, users may look at the double
+ '+ 1' as a mistake and try to remove it. We take this time to enumerate
+ these issues because we've fallen into them before and hope that we save
+ others time in their own implementations.
+
+ As an example of deriving per curve segment and varying primvar data counts from
+ the wrap, type, basis, and curveVertexCount, the following table is provided.
+
+ wrap | type | basis | curveVertexCount | curveSegmentCount | varyingDataCount
+ ------------- | ------- | ------- | ----------------- | ------------------ | -------------------------
+ nonperiodic | linear | N/A | [2 3 2 5] | [1 2 1 4] | [2 3 2 5]
+ nonperiodic | cubic | bezier | [4 7 10 4 7] | [1 2 3 1 2] | [2 3 4 2 3]
+ nonperiodic | cubic | bspline | [5 4 6 7] | [2 1 3 4] | [3 2 4 5]
+ periodic | cubic | bezier | [6 9 6] | [2 3 2] | [2 3 2]
+ periodic | linear | N/A | [3 7] | [3 7] | [3 7]
+
+ Tubes and Ribbons
+
+ The strictest definition of a curve as an infinitely thin wire is not
+ particularly useful for describing production scenes. The additional
+ widths and normals attributes can be used to describe cylindrical
+ tubes and or flat oriented ribbons.
+
+ Curves with only widths defined are imaged as tubes with radius
+ 'width / 2'. Curves with both widths and normals are imaged as ribbons
+ oriented in the direction of the interpolated normal vectors.
+
+ While not technically UsdGeomPrimvars, widths and normals
+ also have interpolation metadata. It's common for authored widths to have
+ constant, varying, or vertex interpolation
+ (see UsdGeomCurves::GetWidthsInterpolation()). It's common for
+ authored normals to have varying interpolation
+ (see UsdGeomPointBased::GetNormalsInterpolation()).
+
+ \\image html USDCurveHydra.png
+
+ The file used to generate these curves can be found in
+ extras/usd/examples/usdGeomExamples/basisCurves.usda. It's provided
+ as a reference on how to properly image both tubes and ribbons. The first
+ row of curves are linear; the second are cubic bezier. (We aim in future
+ releases of HdSt to fix the discontinuity seen with broken tangents to
+ better match offline renderers like RenderMan.) The yellow and violet
+ cubic curves represent cubic vertex width interpolation for which there is
+ no equivalent for linear curves.
+
+ \\note How did this prim type get its name? This prim is a portmanteau of
+ two different statements in the original RenderMan specification:
+ 'Basis' and 'Curves'.
+"""
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ uniform token basis = "bezier" (
+ allowedTokens = ["bezier", "bspline", "catmullRom"]
+ doc = """The basis specifies the vstep and matrix used for cubic
+ interpolation. \\note The 'hermite' and 'power' tokens have been
+ removed. We've provided UsdGeomHermiteCurves
+ as an alternative for the 'hermite' basis."""
+ )
+ int[] curveVertexCounts (
+ doc = """Curves-derived primitives can represent multiple distinct,
+ potentially disconnected curves. The length of 'curveVertexCounts'
+ gives the number of such curves, and each element describes the
+ number of vertices in the corresponding curve"""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ uniform token type = "cubic" (
+ allowedTokens = ["linear", "cubic"]
+ doc = """Linear curves interpolate linearly between two vertices.
+ Cubic curves use a basis matrix with four vertices to interpolate a segment."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ float[] widths (
+ doc = """Provides width specification for the curves, whose application
+ will depend on whether the curve is oriented (normals are defined for
+ it), in which case widths are \"ribbon width\", or unoriented, in which
+ case widths are cylinder width. 'widths' is not a generic Primvar,
+ but the number of elements in this attribute will be determined by
+ its 'interpolation'. See . If 'widths'
+ and 'primvars:widths' are both specified, the latter has precedence."""
+ )
+ uniform token wrap = "nonperiodic" (
+ allowedTokens = ["nonperiodic", "periodic", "pinned"]
+ doc = """If wrap is set to periodic, the curve when rendered will
+ repeat the initial vertices (dependent on the vstep) to close the
+ curve. If wrap is set to 'pinned', phantom points may be created
+ to ensure that the curve interpolation starts at P[0] and ends at P[n-1].
+ """
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class NurbsCurves "NurbsCurves" (
+ doc = """This schema is analagous to NURBS Curves in packages like Maya
+ and Houdini, often used for interchange of rigging and modeling curves.
+ Unlike Maya, this curve spec supports batching of multiple curves into a
+ single prim, widths, and normals in the schema. Additionally, we require
+ 'numSegments + 2 * degree + 1' knots (2 more than maya does). This is to
+ be more consistent with RenderMan's NURBS patch specification.
+
+ To express a periodic curve:
+ - knot[0] = knot[1] - (knots[-2] - knots[-3];
+ - knot[-1] = knot[-2] + (knot[2] - knots[1]);
+
+ To express a nonperiodic curve:
+ - knot[0] = knot[1];
+ - knot[-1] = knot[-2];
+
+ In spite of these slight differences in the spec, curves generated in Maya
+ should be preserved when roundtripping.
+
+ order and range, when representing a batched NurbsCurve should be
+ authored one value per curve. knots should be the concatentation of
+ all batched curves."""
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ int[] curveVertexCounts (
+ doc = """Curves-derived primitives can represent multiple distinct,
+ potentially disconnected curves. The length of 'curveVertexCounts'
+ gives the number of such curves, and each element describes the
+ number of vertices in the corresponding curve"""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ double[] knots (
+ doc = """Knot vector providing curve parameterization.
+ The length of the slice of the array for the ith curve
+ must be ( curveVertexCount[i] + order[i] ), and its
+ entries must take on monotonically increasing values."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ int[] order = [] (
+ doc = """Order of the curve. Order must be positive and is
+ equal to the degree of the polynomial basis to be evaluated, plus 1.
+ Its value for the 'i'th curve must be less than or equal to
+ curveVertexCount[i]"""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ double[] pointWeights (
+ doc = """Optionally provides \"w\" components for each control point,
+ thus must be the same length as the points attribute. If authored,
+ the curve will be rational. If unauthored, the curve will be
+ polynomial, i.e. weight for all points is 1.0.
+ \\note Some DCC's pre-weight the points, but in this schema,
+ points are not pre-weighted."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double2[] ranges (
+ doc = """Provides the minimum and maximum parametric values (as defined
+ by knots) over which the curve is actually defined. The minimum must
+ be less than the maximum, and greater than or equal to the value of the
+ knots['i'th curve slice][order[i]-1]. The maxium must be less
+ than or equal to the last element's value in knots['i'th curve slice].
+\tRange maps to (vmin, vmax) in the RenderMan spec."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ float[] widths (
+ doc = """Provides width specification for the curves, whose application
+ will depend on whether the curve is oriented (normals are defined for
+ it), in which case widths are \"ribbon width\", or unoriented, in which
+ case widths are cylinder width. 'widths' is not a generic Primvar,
+ but the number of elements in this attribute will be determined by
+ its 'interpolation'. See . If 'widths'
+ and 'primvars:widths' are both specified, the latter has precedence."""
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Points "Points" (
+ doc = """Points are analogous to the RiPoints spec.
+
+ Points can be an efficient means of storing and rendering particle
+ effects comprised of thousands or millions of small particles. Points
+ generally receive a single shading sample each, which should take
+ normals into account, if present.
+
+ While not technically UsdGeomPrimvars, the widths and normals also
+ have interpolation metadata. It's common for authored widths and normals
+ to have constant or varying interpolation."""
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ int64[] ids (
+ doc = """Ids are optional; if authored, the ids array should be the same
+ length as the points array, specifying (at each timesample if
+ point identities are changing) the id of each point. The
+ type is signed intentionally, so that clients can encode some
+ binary state on Id'd points without adding a separate
+ primvar."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ float[] widths (
+ doc = """Widths are defined as the diameter of the points, in
+ object space. 'widths' is not a generic Primvar, but
+ the number of elements in this attribute will be determined by
+ its 'interpolation'. See . If
+ 'widths' and 'primvars:widths' are both specified, the latter
+ has precedence."""
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class PointInstancer "PointInstancer" (
+ doc = """Encodes vectorized instancing of multiple, potentially
+ animated, prototypes (object/instance masters), which can be arbitrary
+ prims/subtrees on a UsdStage.
+
+ PointInstancer is a \"multi instancer\", as it allows multiple prototypes
+ to be scattered among its \"points\". We use a UsdRelationship
+ prototypes to identify and order all of the possible prototypes, by
+ targeting the root prim of each prototype. The ordering imparted by
+ relationships associates a zero-based integer with each prototype, and
+ it is these integers we use to identify the prototype of each instance,
+ compactly, and allowing prototypes to be swapped out without needing to
+ reauthor all of the per-instance data.
+
+ The PointInstancer schema is designed to scale to billions of instances,
+ which motivates the choice to split the per-instance transformation into
+ position, (quaternion) orientation, and scales, rather than a
+ 4x4 matrix per-instance. In addition to requiring fewer bytes even if
+ all elements are authored (32 bytes vs 64 for a single-precision 4x4
+ matrix), we can also be selective about which attributes need to animate
+ over time, for substantial data reduction in many cases.
+
+ Note that PointInstancer is not a Gprim, since it is not a graphical
+ primitive by any stretch of the imagination. It is, however,
+ Boundable, since we will sometimes want to treat the entire PointInstancer
+ similarly to a procedural, from the perspective of inclusion or framing.
+
+ Varying Instance Identity over Time
+
+ PointInstancers originating from simulations often have the characteristic
+ that points/instances are \"born\", move around for some time period, and then
+ die (or leave the area of interest). In such cases, billions of instances
+ may be birthed over time, while at any specific time, only a much
+ smaller number are actually alive. To encode this situation efficiently,
+ the simulator may re-use indices in the instance arrays, when a particle
+ dies, its index will be taken over by a new particle that may be birthed in
+ a much different location. This presents challenges both for
+ identity-tracking, and for motion-blur.
+
+ We facilitate identity tracking by providing an optional, animatable
+ ids attribute, that specifies the 64 bit integer ID of the particle
+ at each index, at each point in time. If the simulator keeps monotonically
+ increasing a particle-count each time a new particle is birthed, it will
+ serve perfectly as particle ids.
+
+ We facilitate motion blur for varying-topology particle streams by
+ optionally allowing per-instance velocities and angularVelocities
+ to be authored. If instance transforms are requested at a time between
+ samples and either of the velocity attributes is authored, then we will
+ not attempt to interpolate samples of positions or orientations.
+ If not authored, and the bracketing samples have the same length, then we
+ will interpolate.
+
+ Computing an Instance Transform
+
+ Each instance's transformation is a combination of the SRT affine transform
+ described by its scale, orientation, and position, applied after
+ (i.e. less locally than) the local to parent transformation computed at
+ the root of the prototype it is instancing.
+
+ If your processing of prototype geometry naturally takes into account the
+ transform of the prototype root, then this term can be omitted from the
+ computation of each instance transform, and this can be controlled when
+ computing instance transformation matrices using the
+ UsdGeomPointInstancer::PrototypeXformInclusion enumeration.
+
+ To understand the computation of the instance transform, in order to put
+ an instance of a PointInstancer into the space of the PointInstancer's
+ parent prim we do the following:
+
+ 1. Apply (most locally) the authored local to parent transformation for
+ prototypes[protoIndices[i]]
+ 2. If *scales* is authored, next apply the scaling matrix from *scales[i]*
+ 3. If *orientations* is authored: **if *angularVelocities* is authored**,
+ first multiply *orientations[i]* by the unit quaternion derived by scaling
+ *angularVelocities[i]* by the \"time differential\"
+ from the left-bracketing timeSample for *orientation* to the requested
+ evaluation time *t*, storing the result in *R*, **else** assign *R*
+ directly from *orientations[i]*. Apply the rotation matrix derived
+ from *R*.
+ 4. Apply the translation derived from *positions[i]*. If *velocities* is
+ authored, apply the translation deriving from *velocities[i]* scaled by
+ the time differential from the left-bracketing timeSample for *positions*
+ to the requested evaluation time *t*.
+ 5. Least locally, apply the transformation authored on the PointInstancer
+ prim itself (or the UsdGeomImageable::ComputeLocalToWorldTransform() of the
+ PointInstancer to put the instance directly into world space)
+
+ If neither *velocities* nor *angularVelocities* are authored, we fallback to
+ standard position and orientation computation logic (using linear
+ interpolation between timeSamples) as described by
+ .
+
+ \\anchor UsdGeom_PITimeScaling
+ Scaling Velocities for Interpolation
+
+ When computing time-differentials by which to apply velocity or
+ angularVelocity to positions or orientations, we must scale by
+ ( 1.0 / UsdStage::GetTimeCodesPerSecond() ), because velocities are recorded
+ in units/second, while we are interpolating in UsdTimeCode ordinates.
+
+ We provide both high and low-level API's for dealing with the
+ transformation as a matrix, both will compute the instance matrices using
+ multiple threads; the low-level API allows the client to cache unvarying
+ inputs so that they need not be read duplicately when computing over
+ time.
+
+ See also .
+
+ Primvars on PointInstancer
+
+ \"Primvars\" authored on a PointInstancer prim should
+ always be applied to each instance with constant interpolation at
+ the root of the instance. When you are authoring primvars on a
+ PointInstancer, think about it as if you were authoring them on a
+ point-cloud (e.g. a UsdGeomPoints gprim). The same
+ interpolation rules for points apply here, substituting
+ \"instance\" for \"point\".
+
+ In other words, the (constant) value extracted for each instance
+ from the authored primvar value depends on the authored interpolation
+ and elementSize of the primvar, as follows:
+ - constant or uniform : the entire authored value of the
+ primvar should be applied exactly to each instance.
+ - varying, vertex, or faceVarying: the first
+ elementSize elements of the authored primvar array should be assigned to
+ instance zero, the second elementSize elements should be assigned to
+ instance one, and so forth.
+
+
+ Masking Instances: \"Deactivating\" and Invising
+
+ Often a PointInstancer is created \"upstream\" in a graphics pipeline, and
+ the needs of \"downstream\" clients necessitate eliminating some of the
+ instances from further consideration. Accomplishing this pruning by
+ re-authoring all of the per-instance attributes is not very attractive,
+ since it may mean destructively editing a large quantity of data. We
+ therefore provide means of \"masking\" instances by ID, such that the
+ instance data is unmolested, but per-instance transform and primvar data
+ can be retrieved with the no-longer-desired instances eliminated from the
+ (smaller) arrays. PointInstancer allows two independent means of masking
+ instances by ID, each with different features that meet the needs of
+ various clients in a pipeline. Both pruning features' lists of ID's are
+ combined to produce the mask returned by ComputeMaskAtTime().
+
+ \\note If a PointInstancer has no authored ids attribute, the masking
+ features will still be available, with the integers specifying element
+ position in the protoIndices array rather than ID.
+
+ \\subsection UsdGeomPointInstancer_inactiveIds InactiveIds: List-edited, Unvarying Masking
+
+ The first masking feature encodes a list of IDs in a list-editable metadatum
+ called inactiveIds, which, although it does not have any similar
+ impact to stage population as \"prim activation\",
+ it shares with that feature that its application is uniform over all time.
+ Because it is list-editable, we can sparsely add and remove instances
+ from it in many layers.
+
+ This sparse application pattern makes inactiveIds a good choice when
+ further downstream clients may need to reverse masking decisions made
+ upstream, in a manner that is robust to many kinds of future changes to
+ the upstream data.
+
+ See ActivateId(), ActivateIds(), DeactivateId(), DeactivateIds(),
+ ActivateAllIds()
+
+ \\subsection UsdGeomPointInstancer_invisibleIds invisibleIds: Animatable Masking
+
+ The second masking feature encodes a list of IDs in a time-varying
+ Int64Array-valued UsdAttribute called invisibleIds , since it shares
+ with \"Imageable visibility\"
+ the ability to animate object visibility.
+
+ Unlike inactiveIds, overriding a set of opinions for invisibleIds
+ is not at all straightforward, because one will, in general need to
+ reauthor (in the overriding layer) **all** timeSamples for the attribute
+ just to change one Id's visibility state, so it cannot be authored
+ sparsely. But it can be a very useful tool for situations like encoding
+ pre-computed camera-frustum culling of geometry when either or both of
+ the instances or the camera is animated.
+
+ See VisId(), VisIds(), InvisId(), InvisIds(), VisAllIds()
+
+ Processing and Not Processing Prototypes
+
+ Any prim in the scenegraph can be targeted as a prototype by the
+ prototypes relationship. We do not, however, provide a specific
+ mechanism for identifying prototypes as geometry that should not be drawn
+ (or processed) in their own, local spaces in the scenegraph. We
+ encourage organizing all prototypes as children of the PointInstancer
+ prim that consumes them, and pruning \"raw\" processing and drawing
+ traversals when they encounter a PointInstancer prim; this is what the
+ UsdGeomBBoxCache and UsdImaging engines do.
+
+ There is a pattern one can deploy for organizing the prototypes
+ such that they will automatically be skipped by basic UsdPrim::GetChildren()
+ or UsdPrimRange traversals. Usd prims each have a
+ \"specifier\" of \"def\", \"over\", or \"class\". The
+ default traversals skip over prims that are \"pure overs\" or classes. So
+ to protect prototypes from all generic traversals and processing, place
+ them under a prim that is just an \"over\". For example,
+ \\code
+ 01 def PointInstancer \"Crowd_Mid\"
+ 02 {
+ 03 rel prototypes = [ , ]
+ 04
+ 05 over \"Prototypes\"
+ 06 {
+ 07 def \"MaleThin_Business\" (
+ 08 references = [@MaleGroupA/usd/MaleGroupA.usd@]
+ 09 variants = {
+ 10 string modelingVariant = \"Thin\"
+ 11 string costumeVariant = \"BusinessAttire\"
+ 12 }
+ 13 )
+ 14 { ... }
+ 15
+ 16 def \"MaleThin_Casual\"
+ 17 ...
+ 18 }
+ 19 }
+ \\endcode
+ """
+)
+{
+ vector3f[] accelerations (
+ doc = """If authored, per-instance 'accelerations' will be used with
+ velocities to compute positions between samples for the 'positions'
+ attribute rather than interpolating between neighboring 'positions'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ vector3f[] angularVelocities (
+ doc = """If authored, per-instance angular velocity vector to be used for
+ interoplating orientations. Angular velocities should be considered
+ mandatory if both protoIndices and orientations are animated.
+ Angular velocity is measured in degrees per second. To convert
+ to degrees per UsdTimeCode, divide by
+ UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ int64[] ids (
+ doc = """Ids are optional; if authored, the ids array should be the same
+ length as the protoIndices array, specifying (at each timeSample if
+ instance identities are changing) the id of each instance. The
+ type is signed intentionally, so that clients can encode some
+ binary state on Id'd instances without adding a separate primvar.
+ See also \\ref UsdGeomPointInstancer_varyingTopo"""
+ )
+ int64[] invisibleIds = [] (
+ doc = """A list of id's to make invisible at the evaluation time.
+ See ."""
+ )
+ quath[] orientations (
+ doc = """If authored, per-instance orientation of each instance about its
+ prototype's origin, represented as a unit length quaternion, which
+ allows us to encode it with sufficient precision in a compact GfQuath.
+
+ It is client's responsibility to ensure that authored quaternions are
+ unit length; the convenience API below for authoring orientations from
+ rotation matrices will ensure that quaternions are unit length, though
+ it will not make any attempt to select the \"better (for interpolation
+ with respect to neighboring samples)\" of the two possible quaternions
+ that encode the rotation.
+
+ See also ."""
+ )
+ quatf[] orientationsf (
+ doc = """If authored, per-instance orientation of each instance about its
+ prototype's origin, represented as a unit length quaternion, encoded
+ as a GfQuatf to support higher precision computations.
+
+ It is client's responsibility to ensure that authored quaternions are
+ unit length; the convenience API below for authoring orientations from
+ rotation matrices will ensure that quaternions are unit length, though
+ it will not make any attempt to select the \"better (for interpolation
+ with respect to neighboring samples)\" of the two possible quaternions
+ that encode the rotation. Note that if the earliest time sample (or
+ default value if there are no time samples) of orientationsf is not empty
+ orientationsf will be preferred over orientations if both are authored.
+
+ See also ."""
+ )
+ point3f[] positions (
+ doc = """Required property. Per-instance position. See also
+ ."""
+ )
+ int[] protoIndices (
+ doc = """Required property. Per-instance index into
+ prototypes relationship that identifies what geometry should be
+ drawn for each instance. Topology attribute - can be animated,
+ but at a potential performance impact for streaming."""
+ )
+ rel prototypes (
+ doc = """Required property. Orders and targets the prototype root
+ prims, which can be located anywhere in the scenegraph that is convenient,
+ although we promote organizing prototypes as children of the
+ PointInstancer. The position of a prototype in this relationship defines
+ the value an instance would specify in the protoIndices attribute to
+ instance that prototype. Since relationships are uniform, this property
+ cannot be animated."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ float3[] scales (
+ doc = """If authored, per-instance scale to be applied to
+ each instance, before any rotation is applied.
+
+ See also ."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, per-instance 'velocities' will be used to
+ compute positions between samples for the 'positions' attribute,
+ rather than interpolating between neighboring 'positions' samples.
+ Velocities should be considered mandatory if both protoIndices
+ and positions are animated. Velocity is measured in position
+ units per second, as per most simulation software. To convert to
+ position units per UsdTimeCode, divide by
+ UsdStage::GetTimeCodesPerSecond().
+
+ See also
+ ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Camera "Camera" (
+ doc = """Transformable camera.
+
+ Describes optical properties of a camera via a common set of attributes
+ that provide control over the camera's frustum as well as its depth of
+ field. For stereo, the left and right camera are individual prims tagged
+ through the \"stereoRole attribute\".
+
+ There is a corresponding class GfCamera, which can hold the state of a
+ camera (at a particular time). and
+ convert between a USD camera prim and
+ a GfCamera.
+
+ To obtain the camera's location in world space, call the following on a
+ UsdGeomCamera 'camera':
+ \\code
+ GfMatrix4d camXform = camera.ComputeLocalToWorldTransform(time);
+ \\endcode
+ \\note
+ Cameras in USD are always \"Y up\", regardless of the stage's orientation
+ (i.e. UsdGeomGetStageUpAxis()). This means that the inverse of
+ 'camXform' (the VIEW half of the MODELVIEW transform in OpenGL parlance)
+ will transform the world such that the camera is at the origin, looking
+ down the -Z axis, with +Y as the up axis, and +X pointing to the right.
+ This describes a __right handed coordinate system__.
+
+ Units of Measure for Camera Properties
+
+ Despite the familiarity of millimeters for specifying some physical
+ camera properties, UsdGeomCamera opts for greater consistency with all
+ other UsdGeom schemas, which measure geometric properties in scene units,
+ as determined by UsdGeomGetStageMetersPerUnit(). We do make a
+ concession, however, in that lens and filmback properties are measured in
+ __tenths of a scene unit__ rather than \"raw\" scene units. This means
+ that with the fallback value of .01 for _metersPerUnit_ - i.e. scene unit
+ of centimeters - then these \"tenth of scene unit\" properties are
+ effectively millimeters.
+
+ \\note If one adds a Camera prim to a UsdStage whose scene unit is not
+ centimeters, the fallback values for filmback properties will be
+ incorrect (or at the least, unexpected) in an absolute sense; however,
+ proper imaging through a \"default camera\" with focusing disabled depends
+ only on ratios of the other properties, so the camera is still usable.
+ However, it follows that if even one property is authored in the correct
+ scene units, then they all must be.
+
+
+ \\sa \\ref UsdGeom_LinAlgBasics
+ """
+)
+{
+ float4[] clippingPlanes = [] (
+ doc = """Additional, arbitrarily oriented clipping planes.
+ A vector (a,b,c,d) encodes a clipping plane that cuts off
+ (x,y,z) with a * x + b * y + c * z + d * 1 < 0 where (x,y,z)
+ are the coordinates in the camera's space."""
+ )
+ float2 clippingRange = (1, 1000000) (
+ doc = """Near and far clipping distances in scene units; see
+ ."""
+ )
+ float exposure = 0 (
+ doc = """Exposure adjustment, as a log base-2 value. The default
+ of 0.0 has no effect. A value of 1.0 will double the
+ image-plane intensities in a rendered image; a value of
+ -1.0 will halve them."""
+ )
+ float focalLength = 50 (
+ doc = """Perspective focal length in tenths of a scene unit; see
+ ."""
+ )
+ float focusDistance = 0 (
+ doc = """Distance from the camera to the focus plane in scene units; see
+ ."""
+ )
+ float fStop = 0 (
+ doc = "Lens aperture. Defaults to 0.0, which turns off focusing."
+ )
+ float horizontalAperture = 20.955 (
+ doc = """Horizontal aperture in tenths of a scene unit; see
+ . Default is the equivalent of
+ the standard 35mm spherical projector aperture."""
+ )
+ float horizontalApertureOffset = 0 (
+ doc = """Horizontal aperture offset in the same units as
+ horizontalAperture. Defaults to 0."""
+ )
+ token projection = "perspective" (
+ allowedTokens = ["perspective", "orthographic"]
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ double shutter:close = 0 (
+ doc = """Frame relative shutter close time, analogous comments from
+ shutter:open apply. A value greater or equal to shutter:open
+ should be authored, otherwise there is no exposure and a
+ renderer should produce a black image."""
+ )
+ double shutter:open = 0 (
+ doc = """Frame relative shutter open time in UsdTimeCode units (negative
+ value indicates that the shutter opens before the current
+ frame time). Used for motion blur."""
+ )
+ uniform token stereoRole = "mono" (
+ allowedTokens = ["mono", "left", "right"]
+ doc = """If different from mono, the camera is intended to be the left
+ or right camera of a stereo setup."""
+ )
+ float verticalAperture = 15.2908 (
+ doc = """Vertical aperture in tenths of a scene unit; see
+ . Default is the equivalent of
+ the standard 35mm spherical projector aperture."""
+ )
+ float verticalApertureOffset = 0 (
+ doc = """Vertical aperture offset in the same units as
+ verticalAperture. Defaults to 0."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "GeomModelAPI" (
+ doc = """UsdGeomModelAPI extends the generic UsdModelAPI schema with
+ geometry specific concepts such as cached extents for the entire model,
+ constraint targets, and geometry-inspired extensions to the payload
+ lofting process.
+
+ As described in GetExtentsHint() below, it is useful to cache extents
+ at the model level. UsdGeomModelAPI provides schema for computing and
+ storing these cached extents, which can be consumed by UsdGeomBBoxCache to
+ provide fast access to precomputed extents that will be used as the model's
+ bounds ( see UsdGeomBBoxCache::UsdGeomBBoxCache() ).
+
+ Draw Modes
+
+ Draw modes provide optional alternate imaging behavior for USD subtrees with
+ kind model. model:drawMode (which is inheritable) and
+ model:applyDrawMode (which is not) are resolved into a decision to stop
+ traversing the scene graph at a certain point, and replace a USD subtree
+ with proxy geometry.
+
+ The value of model:drawMode determines the type of proxy geometry:
+ - origin - Draw the model-space basis vectors of the replaced prim.
+ - bounds - Draw the model-space bounding box of the replaced prim.
+ - cards - Draw textured quads as a placeholder for the replaced prim.
+ - default - An explicit opinion to draw the USD subtree as normal.
+ - inherited - Defer to the parent opinion.
+
+ model:drawMode falls back to _inherited_ so that a whole scene,
+ a large group, or all prototypes of a model hierarchy PointInstancer can
+ be assigned a draw mode with a single attribute edit. If no draw mode is
+ explicitly set in a hierarchy, the resolved value is _default_.
+
+ model:applyDrawMode is meant to be written when an asset is authored,
+ and provides flexibility for different asset types. For example,
+ a character assembly (composed of character, clothes, etc) might have
+ model:applyDrawMode set at the top of the subtree so the whole group
+ can be drawn as a single card object. An effects subtree might have
+ model:applyDrawMode set at a lower level so each particle
+ group draws individually.
+
+ Models of kind component are automatically treated as if
+ model:applyDrawMode were true if model:applyDrawMode is not
+ authored on the component prim. A component prim will be drawn drawn with a
+ simplified representation when the prim has kind component,
+ model:applyDrawMode is not authored (or authored to be true), and the
+ resolved (i.e. inherited down namespace) value for model:drawMode is
+ not _default_. If you don't want component prims to use the resolved
+ non-default drawMode, you must apply the UsdGeomModelAPI schema on the prim
+ and explicitly set model:applyDrawMode to false.
+
+ Cards Geometry
+
+ The specific geometry used in cards mode is controlled by the
+ model:cardGeometry attribute:
+ - cross - Generate a quad normal to each basis direction and negative.
+ Locate each quad so that it bisects the model extents.
+ - box - Generate a quad normal to each basis direction and negative.
+ Locate each quad on a face of the model extents, facing out.
+ - fromTexture - Generate a quad for each supplied texture from
+ attributes stored in that texture's metadata.
+
+ For cross and box mode, the extents are calculated for purposes
+ default, proxy, and render, at their earliest authored time.
+ If the model has no textures, all six card faces are rendered using
+ model:drawModeColor. If one or more textures are present, only axes
+ with one or more textures assigned are drawn. For each axis, if both
+ textures (positive and negative) are specified, they'll be used on the
+ corresponding card faces; if only one texture is specified, it will be
+ mapped to the opposite card face after being flipped on the texture's
+ s-axis. Any card faces with invalid asset paths will be drawn with
+ model:drawModeColor.
+
+ Both model:cardGeometry and model:drawModeColor should be
+ authored on the prim where the draw mode takes effect, since these
+ attributes are not inherited.
+
+ For fromTexture mode, only card faces with valid textures assigned
+ are drawn. The geometry is generated by pulling the worldtoscreen
+ attribute out of texture metadata. This is expected to be a 4x4 matrix
+ mapping the model-space position of the card quad to the clip-space quad
+ with corners (-1,-1,0) and (1,1,0). The card vertices are generated by
+ transforming the clip-space corners by the inverse of worldtoscreen.
+ Textures are mapped so that (s) and (t) map to (+x) and (+y) in clip space.
+ If the metadata cannot be read in the right format, or the matrix can't
+ be inverted, the card face is not drawn.
+
+ All card faces are drawn and textured as single-sided.
+
+ \\todo CreatePayload() """
+)
+{
+ uniform bool model:applyDrawMode = 0 (
+ doc = """If true, and the resolved value of model:drawMode is
+ non-default, apply an alternate imaging mode to this prim. See
+ \\ref UsdGeomModelAPI_drawMode."""
+ )
+ uniform token model:cardGeometry = "cross" (
+ allowedTokens = ["cross", "box", "fromTexture"]
+ doc = """The geometry to generate for imaging prims inserted for \\em
+ cards imaging mode. See for
+ geometry descriptions."""
+ )
+ asset model:cardTextureXNeg (
+ doc = """In cards imaging mode, the texture applied to the X- quad.
+ The texture axes (s,t) are mapped to model-space axes (y, -z)."""
+ )
+ asset model:cardTextureXPos (
+ doc = """In cards imaging mode, the texture applied to the X+ quad.
+ The texture axes (s,t) are mapped to model-space axes (-y, -z)."""
+ )
+ asset model:cardTextureYNeg (
+ doc = """In cards imaging mode, the texture applied to the Y- quad.
+ The texture axes (s,t) are mapped to model-space axes (-x, -z)."""
+ )
+ asset model:cardTextureYPos (
+ doc = """In cards imaging mode, the texture applied to the Y+ quad.
+ The texture axes (s,t) are mapped to model-space axes (x, -z)."""
+ )
+ asset model:cardTextureZNeg (
+ doc = """In cards imaging mode, the texture applied to the Z- quad.
+ The texture axes (s,t) are mapped to model-space axes (-x, -y)."""
+ )
+ asset model:cardTextureZPos (
+ doc = """In cards imaging mode, the texture applied to the Z+ quad.
+ The texture axes (s,t) are mapped to model-space axes (x, -y)."""
+ )
+ uniform token model:drawMode = "inherited" (
+ allowedTokens = ["origin", "bounds", "cards", "default", "inherited"]
+ doc = """Alternate imaging mode; applied to this prim or child prims
+ where model:applyDrawMode is true, or where the prim
+ has kind component and model:applyDrawMode is not
+ authored. See \\ref UsdGeomModelAPI_drawMode
+ for mode descriptions."""
+ )
+ uniform float3 model:drawModeColor = (0.18, 0.18, 0.18) (
+ doc = """The base color of imaging prims inserted for alternate
+ imaging modes. For origin and bounds modes, this
+ controls line color; for cards mode, this controls the
+ fallback quad color."""
+ )
+}
+
+class "MotionAPI" (
+ doc = '''UsdGeomMotionAPI encodes data that can live on any prim that
+ may affect computations involving:
+ - computed motion for motion blur
+ - sampling for motion blur
+
+ The "motion:blurScale" attribute allows
+ artists to scale the __amount__ of motion blur to be rendered for parts
+ of the scene without changing the recorded animation. See
+ for use and implementation details.
+
+ '''
+)
+{
+ float motion:blurScale = 1 (
+ doc = """BlurScale is an __inherited__ float attribute that stipulates
+ the rendered motion blur (as typically specified via UsdGeomCamera's
+ _shutter:open_ and _shutter:close_ properties) should be scaled for
+ __all objects__ at and beneath the prim in namespace on which the
+ _motion:blurScale_ value is specified.
+
+ Without changing any other data in the scene, _blurScale_ allows artists to
+ \"dial in\" the amount of blur on a per-object basis. A _blurScale_
+ value of zero removes all blur, a value of 0.5 reduces blur by half,
+ and a value of 2.0 doubles the blur. The legal range for _blurScale_
+ is [0, inf), although very high values may result in extremely expensive
+ renders, and may exceed the capabilities of some renderers.
+
+ Although renderers are free to implement this feature however they see
+ fit, see for our guidance on implementing
+ the feature universally and efficiently.
+
+ \\sa ComputeMotionBlurScale()
+ """
+ )
+ int motion:nonlinearSampleCount = 3 (
+ doc = """Determines the number of position or transformation samples
+ created when motion is described by attributes contributing non-linear
+ terms.
+
+ To give an example, imagine an application (such as a
+ renderer) consuming 'points' and the USD document also
+ contains 'accelerations' for the same prim. Unless the
+ application can consume these 'accelerations' itself, an
+ intermediate layer has to compute samples within the sampling
+ interval for the point positions based on the value of
+ 'points', 'velocities' and 'accelerations'. The number of these
+ samples is given by 'nonlinearSampleCount'. The samples are
+ equally spaced within the sampling interval.
+
+ Another example involves the PointInstancer where
+ 'nonlinearSampleCount' is relevant when 'angularVelocities'
+ or 'accelerations' are authored.
+
+ 'nonlinearSampleCount' is an **inherited** attribute, also
+ see ComputeNonlinearSampleCount()"""
+ )
+ float motion:velocityScale = 1 (
+ doc = """\\deprecated
+
+ VelocityScale is an **inherited** float attribute that
+ velocity-based schemas (e.g. PointBased, PointInstancer) can consume
+ to compute interpolated positions and orientations by applying
+ velocity and angularVelocity, which is required for interpolating
+ between samples when topology is varying over time. Although these
+ quantities are generally physically computed by a simulator, sometimes
+ we require more or less motion-blur to achieve the desired look.
+ VelocityScale allows artists to dial-in, as a post-sim correction,
+ a scale factor to be applied to the velocity prior to computing
+ interpolated positions from it."""
+ )
+}
+
+class "XformCommonAPI" (
+ doc = """This class provides API for authoring and retrieving a standard set
+ of component transformations which include a scale, a rotation, a
+ scale-rotate pivot and a translation. The goal of the API is to enhance
+ component-wise interchange. It achieves this by limiting the set of allowed
+ basic ops and by specifying the order in which they are applied. In addition
+ to the basic set of ops, the 'resetXformStack' bit can also be set to
+ indicate whether the underlying xformable resets the parent transformation
+ (i.e. does not inherit it's parent's transformation).
+
+ \\sa UsdGeomXformCommonAPI::GetResetXformStack()
+ \\sa UsdGeomXformCommonAPI::SetResetXformStack()
+
+ The operator-bool for the class will inform you whether an existing
+ xformable is compatible with this API.
+
+ The scale-rotate pivot is represented by a pair of (translate,
+ inverse-translate) xformOps around the scale and rotate operations.
+ The rotation operation can be any of the six allowed Euler angle sets.
+ \\sa UsdGeomXformOp::Type.
+
+ The xformOpOrder of an xformable that has all of the supported basic ops
+ is as follows:
+ [\"xformOp:translate\", \"xformOp:translate:pivot\", \"xformOp:rotateXYZ\",
+ \"xformOp:scale\", \"!invert!xformOp:translate:pivot\"].
+
+ It is worth noting that all of the ops are optional. For example, an
+ xformable may have only a translate or a rotate. It would still be
+ considered as compatible with this API. Individual SetTranslate(),
+ SetRotate(), SetScale() and SetPivot() methods are provided by this API
+ to allow such sparse authoring."""
+)
+{
+}
+
+class HermiteCurves "HermiteCurves" (
+ doc = """This schema specifies a cubic hermite interpolated curve batch as
+ sometimes used for defining guides for animation. While hermite curves can
+ be useful because they interpolate through their control points, they are
+ not well supported by high-end renderers for imaging. Therefore, while we
+ include this schema for interchange, we strongly recommend the use of
+ UsdGeomBasisCurves as the representation of curves intended to be rendered
+ (ie. hair or grass). Hermite curves can be converted to a Bezier
+ representation (though not from Bezier back to Hermite in general).
+
+ Point Interpolation
+
+ The initial cubic curve segment is defined by the first two points and
+ first two tangents. Additional segments are defined by additional
+ point / tangent pairs. The number of segments for each non-batched hermite
+ curve would be len(curve.points) - 1. The total number of segments
+ for the batched UsdGeomHermiteCurves representation is
+ len(points) - len(curveVertexCounts).
+
+ Primvar, Width, and Normal Interpolation
+
+ Primvar interpolation is not well specified for this type as it is not
+ intended as a rendering representation. We suggest that per point
+ primvars would be linearly interpolated across each segment and should
+ be tagged as 'varying'.
+
+ It is not immediately clear how to specify cubic or 'vertex' interpolation
+ for this type, as we lack a specification for primvar tangents. This
+ also means that width and normal interpolation should be restricted to
+ varying (linear), uniform (per curve element), or constant (per prim).
+ """
+)
+{
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+ int[] curveVertexCounts (
+ doc = """Curves-derived primitives can represent multiple distinct,
+ potentially disconnected curves. The length of 'curveVertexCounts'
+ gives the number of such curves, and each element describes the
+ number of vertices in the corresponding curve"""
+ )
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ vector3f[] tangents = [] (
+ doc = """Defines the outgoing trajectory tangent for each point.
+ Tangents should be the same size as the points attribute."""
+ )
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also ."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ float[] widths (
+ doc = """Provides width specification for the curves, whose application
+ will depend on whether the curve is oriented (normals are defined for
+ it), in which case widths are \"ribbon width\", or unoriented, in which
+ case widths are cylinder width. 'widths' is not a generic Primvar,
+ but the number of elements in this attribute will be determined by
+ its 'interpolation'. See . If 'widths'
+ and 'primvars:widths' are both specified, the latter has precedence."""
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "SdfMetadata": {
+ "constraintTargetIdentifier": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": "",
+ "documentation": "Unique identifier within a model's namespace for an matrix-valued attribute representing a constraint target",
+ "type": "token"
+ },
+ "elementSize": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": 1,
+ "displayGroup": "Primvars",
+ "documentation": "The number of values in a primvar's value array that must be aggregated for each element on the primitive.",
+ "type": "int"
+ },
+ "inactiveIds": {
+ "appliesTo": [
+ "prims"
+ ],
+ "type": "int64listop"
+ },
+ "interpolation": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": "constant",
+ "displayGroup": "Primvars",
+ "documentation": "How a primvar interpolates across a primitive; equivalent to RenderMan's 'class specifier'",
+ "type": "token"
+ },
+ "metersPerUnit": {
+ "appliesTo": [
+ "layers"
+ ],
+ "default": 0.01,
+ "displayGroup": "Stage",
+ "type": "double"
+ },
+ "unauthoredValuesIndex": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": -1,
+ "displayGroup": "Primvars",
+ "documentation": "The index that represents unauthored values in the indices array of an indexed primvar.",
+ "type": "int"
+ },
+ "upAxis": {
+ "appliesTo": [
+ "layers"
+ ],
+ "default": "Y",
+ "displayGroup": "Stage",
+ "type": "token"
+ }
+ },
+ "Types": {
+ "UsdGeomBasisCurves": {
+ "alias": {
+ "UsdSchemaBase": "BasisCurves"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomCurves"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomBoundable": {
+ "alias": {
+ "UsdSchemaBase": "Boundable"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomXformable"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdGeomCamera": {
+ "alias": {
+ "UsdSchemaBase": "Camera"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomXformable"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomCapsule": {
+ "alias": {
+ "UsdSchemaBase": "Capsule"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomCapsule_1": {
+ "alias": {
+ "UsdSchemaBase": "Capsule_1"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomCone": {
+ "alias": {
+ "UsdSchemaBase": "Cone"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomCube": {
+ "alias": {
+ "UsdSchemaBase": "Cube"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomCurves": {
+ "alias": {
+ "UsdSchemaBase": "Curves"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomPointBased"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "abstractTyped"
+ },
+ "UsdGeomCylinder": {
+ "alias": {
+ "UsdSchemaBase": "Cylinder"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomCylinder_1": {
+ "alias": {
+ "UsdSchemaBase": "Cylinder_1"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomGprim": {
+ "alias": {
+ "UsdSchemaBase": "Gprim"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomBoundable"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdGeomHermiteCurves": {
+ "alias": {
+ "UsdSchemaBase": "HermiteCurves"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomCurves"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomImageable": {
+ "alias": {
+ "UsdSchemaBase": "Imageable"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdGeomMesh": {
+ "alias": {
+ "UsdSchemaBase": "Mesh"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomPointBased"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomModelAPI": {
+ "alias": {
+ "UsdSchemaBase": "GeomModelAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdGeomMotionAPI": {
+ "alias": {
+ "UsdSchemaBase": "MotionAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdGeomNurbsCurves": {
+ "alias": {
+ "UsdSchemaBase": "NurbsCurves"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomCurves"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomNurbsPatch": {
+ "alias": {
+ "UsdSchemaBase": "NurbsPatch"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomPointBased"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomPlane": {
+ "alias": {
+ "UsdSchemaBase": "Plane"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomPointBased": {
+ "alias": {
+ "UsdSchemaBase": "PointBased"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "abstractTyped"
+ },
+ "UsdGeomPointInstancer": {
+ "alias": {
+ "UsdSchemaBase": "PointInstancer"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomBoundable"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomPoints": {
+ "alias": {
+ "UsdSchemaBase": "Points"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomPointBased"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomPrimvarsAPI": {
+ "alias": {
+ "UsdSchemaBase": "PrimvarsAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "nonAppliedAPI"
+ },
+ "UsdGeomScope": {
+ "alias": {
+ "UsdSchemaBase": "Scope"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomImageable"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomSphere": {
+ "alias": {
+ "UsdSchemaBase": "Sphere"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomSubset": {
+ "alias": {
+ "UsdSchemaBase": "GeomSubset"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomTetMesh": {
+ "alias": {
+ "UsdSchemaBase": "TetMesh"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomPointBased"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomVisibilityAPI": {
+ "alias": {
+ "UsdSchemaBase": "VisibilityAPI"
+ },
+ "apiSchemaCanOnlyApplyTo": [
+ "Imageable"
+ ],
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdGeomXform": {
+ "alias": {
+ "UsdSchemaBase": "Xform"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomXformable"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdGeomXformCommonAPI": {
+ "alias": {
+ "UsdSchemaBase": "XformCommonAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "nonAppliedAPI"
+ },
+ "UsdGeomXformable": {
+ "alias": {
+ "UsdSchemaBase": "Xformable"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomImageable"
+ ],
+ "schemaKind": "abstractTyped"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdGeom",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+(
+ "This file describes the USD Geometric schemata for code generation."
+ subLayers = [
+ @usd/schema.usda@
+ ]
+)
+
+over "GLOBAL" (
+ customData = {
+ string libraryName = "usdGeom"
+ string libraryPath = "pxr/usd/usdGeom"
+ # string libraryPrefix = "UsdGeom"
+ # string tokensPrefix = "UsdGeom"
+ bool useLiteralIdentifier = 0
+ dictionary libraryTokens = {
+ dictionary interpolation = {
+ string doc = """UsdGeomPrimvar - How a Primvar interpolates
+ across a primitive; equivalent to RenderMan's \\ref Usd_InterpolationVals "class specifier" """
+ }
+ dictionary elementSize = {
+ string doc = """UsdGeomPrimvar - The number of values in the
+ value array that must be aggregated for each element on the
+ primitive."""
+ }
+ dictionary unauthoredValuesIndex = {
+ string doc = """UsdGeomPrimvar - The index that represents
+ unauthored values in the indices array of an indexed primvar."""
+ }
+ dictionary constant ={
+ string doc = """Possible value for UsdGeomPrimvar::SetInterpolation.
+ Default value for UsdGeomPrimvar::GetInterpolation. One value
+ remains constant over the entire surface primitive."""
+ }
+ dictionary uniform = {
+ string doc = """Possible value for UsdGeomPrimvar::SetInterpolation.
+ One value remains constant for each uv patch segment of the
+ surface primitive (which is a \\em face for meshes)."""
+ }
+ dictionary varying = {
+ string doc = """Possible value for UsdGeomPrimvar::SetInterpolation.
+ Four values are interpolated over each uv patch segment of the
+ surface. Bilinear interpolation is used for interpolation
+ between the four values."""
+ }
+ dictionary vertex = {
+ string doc = """Possible value for UsdGeomPrimvar::SetInterpolation.
+ Values are interpolated between each vertex in the surface
+ primitive. The basis function of the surface is used for
+ interpolation between vertices."""
+ }
+ dictionary faceVarying = {
+ string doc = """Possible value for UsdGeomPrimvar::SetInterpolation.
+ For polygons and subdivision surfaces, four values are
+ interpolated over each face of the mesh. Bilinear interpolation
+ is used for interpolation between the four values."""
+ }
+ dictionary upAxis = {
+ string doc = """Stage-level metadata that encodes a scene's
+ orientation as a token whose value can be "Y" or "Z"."""
+ }
+ dictionary metersPerUnit = {
+ string doc = """Stage-level metadata that encodes a scene's
+ linear unit of measure as meters per encoded unit."""
+ }
+ dictionary partition = {
+ string doc = """A type of family of GeomSubsets. It implies
+ that every element appears exacly once in only one of the
+ subsets in the family."""
+ }
+ dictionary nonOverlapping = {
+ string doc = """A type of family of GeomSubsets. It implies that
+ the elements in the various subsets belonging to the family are
+ mutually exclusive, i.e., an element that appears in one
+ subset may not belong to any other subset in the family."""
+ }
+ dictionary unrestricted = {
+ string doc = """A type of family of GeomSubsets. It implies that
+ there are no restrictions w.r.t. the membership of elements in
+ the subsets. There could be overlapping members in subsets
+ belonging to the family and the union of all subsets in the
+ family may not contain all the elements."""
+ }
+ dictionary hermite = {
+ string doc = """A deprecated basis token for
+ UsdGeomBasisCurves. Consumers of USD should transition
+ to using the UsdGeomHermiteCurves schema."""
+ }
+ dictionary power = {
+ string doc = """A deprecated basis token for
+ UsdGeomBasisCurves."""
+ }
+ }
+ }
+)
+{
+}
+
+class "Imageable" (
+ inherits =
+ doc = """Base class for all prims that may require rendering or
+ visualization of some sort. The primary attributes of Imageable
+ are \\em visibility and \\em purpose, which each provide instructions for
+ what geometry should be included for processing by rendering and other
+ computations.
+
+ \\deprecated Imageable also provides API for accessing primvars, which
+ has been moved to the UsdGeomPrimvarsAPI schema, because primvars can now
+ be applied on non-Imageable prim types. This API is planned
+ to be removed, UsdGeomPrimvarsAPI should be used directly instead."""
+ customData = {
+ string extraIncludes = """
+#include "pxr/base/gf/bbox3d.h"
+#include "pxr/usd/usdGeom/primvar.h" """
+ }
+) {
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = """Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc."""
+ )
+
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See \\ref UsdGeom_ImageablePurpose for more detail about how
+ \\em purpose is computed and used."""
+ )
+ rel proxyPrim (
+ doc = """The \\em proxyPrim relationship allows us to link a
+ prim whose \\em purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ \\li In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ \\li DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ \\li With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render"."""
+ )
+}
+
+class "VisibilityAPI"
+(
+ inherits =
+ customData = {
+ token[] apiSchemaCanOnlyApplyTo = [
+ "Imageable"
+ ]
+ }
+ doc = """
+ UsdGeomVisibilityAPI introduces properties that can be used to author
+ visibility opinions.
+
+ \\note
+ Currently, this schema only introduces the attributes that are used to
+ control purpose visibility. Later, this schema will define _all_
+ visibility-related properties and UsdGeomImageable will no longer define
+ those properties.
+
+ The purpose visibility attributes added by this schema,
+ _guideVisibility_, _proxyVisibility_, and _renderVisibility_ can each be
+ used to control visibility for geometry of the corresponding purpose
+ values, with the overall _visibility_ attribute acting as an
+ override. I.e., if _visibility_ evaluates to "invisible", purpose
+ visibility is invisible; otherwise, purpose visibility is determined by
+ the corresponding purpose visibility attribute.
+
+ Note that the behavior of _guideVisibility_ is subtly different from the
+ _proxyVisibility_ and _renderVisibility_ attributes, in that "guide"
+ purpose visibility always evaluates to either "invisible" or "visible",
+ whereas the other attributes may yield computed values of "inherited" if
+ there is no authored opinion on the attribute or inherited from an
+ ancestor. This is motivated by the fact that, in Pixar"s user workflows,
+ we have never found a need to have all guides visible in a scene by
+ default, whereas we do find that flexibility useful for "proxy" and
+ "render" geometry.
+
+ This schema can only be applied to UsdGeomImageable prims. The
+ UseGeomImageable schema provides API for computing the purpose visibility
+ values that result from the attributes introduced by this schema.
+ """
+)
+{
+ uniform token guideVisibility = "invisible" (
+ allowedTokens = ["inherited", "invisible", "visible"]
+ doc = """
+ This attribute controls visibility for geometry with purpose "guide".
+
+ Unlike overall _visibility_, _guideVisibility_ is uniform, and
+ therefore cannot be animated.
+
+ Also unlike overall _visibility_, _guideVisibility_ is tri-state, in
+ that a descendant with an opinion of "visible" overrides an ancestor
+ opinion of "invisible".
+
+ The _guideVisibility_ attribute works in concert with the overall
+ _visibility_ attribute: The visibility of a prim with purpose "guide"
+ is determined by the inherited values it receives for the _visibility_
+ and _guideVisibility_ attributes. If _visibility_ evaluates to
+ "invisible", the prim is invisible. If _visibility_ evaluates to
+ "inherited" and _guideVisibility_ evaluates to "visible", then the
+ prim is visible. __Otherwise, it is invisible.__
+ """
+ )
+
+ uniform token proxyVisibility = "inherited" (
+ allowedTokens = ["inherited", "invisible", "visible"]
+ doc = """
+ This attribute controls visibility for geometry with purpose "proxy".
+
+ Unlike overall _visibility_, _proxyVisibility_ is uniform, and
+ therefore cannot be animated.
+
+ Also unlike overall _visibility_, _proxyVisibility_ is tri-state, in
+ that a descendant with an opinion of "visible" overrides an ancestor
+ opinion of "invisible".
+
+ The _proxyVisibility_ attribute works in concert with the overall
+ _visibility_ attribute: The visibility of a prim with purpose "proxy"
+ is determined by the inherited values it receives for the _visibility_
+ and _proxyVisibility_ attributes. If _visibility_ evaluates to
+ "invisible", the prim is invisible. If _visibility_ evaluates to
+ "inherited" then: If _proxyVisibility_ evaluates to "visible", then
+ the prim is visible; if _proxyVisibility_ evaluates to "invisible",
+ then the prim is invisible; if _proxyVisibility_ evaluates to
+ "inherited", then the prim may either be visible or invisible,
+ depending on a fallback value determined by the calling context.
+ """
+ )
+
+ uniform token renderVisibility = "inherited" (
+ allowedTokens = ["inherited", "invisible", "visible"]
+ doc = """
+ This attribute controls visibility for geometry with purpose
+ "render".
+
+ Unlike overall _visibility_, _renderVisibility_ is uniform, and
+ therefore cannot be animated.
+
+ Also unlike overall _visibility_, _renderVisibility_ is tri-state, in
+ that a descendant with an opinion of "visible" overrides an ancestor
+ opinion of "invisible".
+
+ The _renderVisibility_ attribute works in concert with the overall
+ _visibility_ attribute: The visibility of a prim with purpose "render"
+ is determined by the inherited values it receives for the _visibility_
+ and _renderVisibility_ attributes. If _visibility_ evaluates to
+ "invisible", the prim is invisible. If _visibility_ evaluates to
+ "inherited" then: If _renderVisibility_ evaluates to "visible", then
+ the prim is visible; if _renderVisibility_ evaluates to "invisible",
+ then the prim is invisible; if _renderVisibility_ evaluates to
+ "inherited", then the prim may either be visible or invisible,
+ depending on a fallback value determined by the calling context.
+ """
+ )
+}
+
+class "PrimvarsAPI" (
+ inherits =
+ doc = """UsdGeomPrimvarsAPI encodes geometric "primitive variables",
+ as UsdGeomPrimvar, which interpolate across a primitive's topology,
+ can override shader inputs, and inherit down namespace.
+
+ \\section usdGeom_PrimvarFetchingAPI Which Method to Use to Retrieve Primvars
+
+ While creating primvars is unambiguous (CreatePrimvar()), there are quite
+ a few methods available for retrieving primvars, making it potentially
+ confusing knowing which one to use. Here are some guidelines:
+
+ \\li If you are populating a GUI with the primvars already available for
+ authoring values on a prim, use GetPrimvars().
+ \\li If you want all of the "useful" (e.g. to a renderer) primvars
+ available at a prim, including those inherited from ancestor prims, use
+ FindPrimvarsWithInheritance(). Note that doing so individually for many
+ prims will be inefficient.
+ \\li To find a particular primvar defined directly on a prim, which may
+ or may not provide a value, use GetPrimvar().
+ \\li To find a particular primvar defined on a prim or inherited from
+ ancestors, which may or may not provide a value, use
+ FindPrimvarWithInheritance().
+ \\li To *efficiently* query for primvars using the overloads of
+ FindPrimvarWithInheritance() and FindPrimvarsWithInheritance(), one
+ must first cache the results of FindIncrementallyInheritablePrimvars() for
+ each non-leaf prim on the stage. """
+ customData = {
+ token apiSchemaType = "nonApplied"
+ string extraIncludes = """
+#include "pxr/usd/usdGeom/primvar.h" """
+ }
+) {
+}
+
+class "Xformable" (
+ inherits =
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usdGeom/xformOp.h"
+#include """
+ }
+ doc = """Base class for all transformable prims, which allows arbitrary
+ sequences of component affine transformations to be encoded.
+
+ \\note
+ You may find it useful to review \\ref UsdGeom_LinAlgBasics while reading
+ this class description.
+
+ Supported Component Transformation Operations
+
+ UsdGeomXformable currently supports arbitrary sequences of the following
+ operations, each of which can be encoded in an attribute of the proper
+ shape in any supported precision:
+ \\li translate - 3D
+ \\li scale - 3D
+ \\li rotateX - 1D angle in degrees
+ \\li rotateY - 1D angle in degrees
+ \\li rotateZ - 1D angle in degrees
+ \\li rotateABC - 3D where ABC can be any combination of the six principle
+ Euler Angle sets: XYZ, XZY, YXZ, YZX, ZXY, ZYX. See
+ \\ref usdGeom_rotationPackingOrder "note on rotation packing order"
+ \\li orient - 4D (quaternion)
+ \\li transform - 4x4D
+
+ Creating a Component Transformation
+
+ To add components to a UsdGeomXformable prim, simply call AddXformOp()
+ with the desired op type, as enumerated in \\ref UsdGeomXformOp::Type,
+ and the desired precision, which is one of \\ref UsdGeomXformOp::Precision.
+ Optionally, you can also provide an "op suffix" for the operator that
+ disambiguates it from other components of the same type on the same prim.
+ Application-specific transform schemas can use the suffixes to fill a role
+ similar to that played by AbcGeom::XformOp's "Hint" enums for their own
+ round-tripping logic.
+
+ We also provide specific "Add" API for each type, for clarity and
+ conciseness, e.g. AddTranslateOp(), AddRotateXYZOp() etc.
+
+ AddXformOp() will return a UsdGeomXformOp object, which is a schema on a
+ newly created UsdAttribute that provides convenience API for authoring
+ and computing the component transformations. The UsdGeomXformOp can then
+ be used to author any number of timesamples and default for the op.
+
+ Each successive call to AddXformOp() adds an operator that will be applied
+ "more locally" than the preceding operator, just as if we were pushing
+ transforms onto a transformation stack - which is precisely what should
+ happen when the operators are consumed by a reader.
+
+ \\note
+ If you can, please try to use the UsdGeomXformCommonAPI, which wraps
+ the UsdGeomXformable with an interface in which Op creation is taken
+ care of for you, and there is a much higher chance that the data you
+ author will be importable without flattening into other DCC's, as it
+ conforms to a fixed set of Scale-Rotate-Translate Ops.
+
+ \\sa \\ref usdGeom_xformableExamples "Using the Authoring API"
+
+ Data Encoding and Op Ordering
+
+ Because there is no "fixed schema" of operations, all of the attributes
+ that encode transform operations are dynamic, and are scoped in
+ the namespace "xformOp". The second component of an attribute's name provides
+ the \\em type of operation, as listed above. An "xformOp" attribute can
+ have additional namespace components derived from the \\em opSuffix argument
+ to the AddXformOp() suite of methods, which provides a preferred way of
+ naming the ops such that we can have multiple "translate" ops with unique
+ attribute names. For example, in the attribute named
+ "xformOp:translate:maya:pivot", "translate" is the type of operation and
+ "maya:pivot" is the suffix.
+
+ The following ordered list of attribute declarations in usda
+ define a basic Scale-Rotate-Translate with XYZ Euler angles, wherein the
+ translation is double-precision, and the remainder of the ops are single,
+ in which we will:
+
+
+
Scale by 2.0 in each dimension
+
Rotate about the X, Y, and Z axes by 30, 60, and 90 degrees, respectively
+
Translate by 100 units in the Y direction
+
+
+ \\code
+ float3 xformOp:rotateXYZ = (30, 60, 90)
+ float3 xformOp:scale = (2, 2, 2)
+ double3 xformOp:translate = (0, 100, 0)
+ uniform token[] xformOpOrder = [ "xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale" ]
+ \\endcode
+
+ The attributes appear in the dictionary order in which USD, by default,
+ sorts them. To ensure the ops are recovered and evaluated in the correct
+ order, the schema introduces the **xformOpOrder** attribute, which
+ contains the names of the op attributes, in the precise sequence in which
+ they should be pushed onto a transform stack. **Note** that the order is
+ opposite to what you might expect, given the matrix algebra described in
+ \\ref UsdGeom_LinAlgBasics. This also dictates order of op creation,
+ since each call to AddXformOp() adds a new op to the end of the
+ \\b xformOpOrder array, as a new "most-local" operation. See
+ \\ref usdGeom_xformableExamples "Example 2 below" for C++ code that could
+ have produced this USD.
+
+ If it were important for the prim's rotations to be independently
+ overridable, we could equivalently (at some performance cost) encode
+ the transformation also like so:
+ \\code
+ float xformOp:rotateX = 30
+ float xformOp:rotateY = 60
+ float xformOp:rotateZ = 90
+ float3 xformOp:scale = (2, 2, 2)
+ double3 xformOp:translate = (0, 100, 0)
+ uniform token[] xformOpOrder = [ "xformOp:translate", "xformOp:rotateZ", "xformOp:rotateY", "xformOp:rotateX", "xformOp:scale" ]
+ \\endcode
+
+ Again, note that although we are encoding an XYZ rotation, the three
+ rotations appear in the **xformOpOrder** in the opposite order, with Z,
+ followed, by Y, followed by X.
+
+ Were we to add a Maya-style scalePivot to the above example, it might
+ look like the following:
+ \\code
+ float3 xformOp:rotateXYZ = (30, 60, 90)
+ float3 xformOp:scale = (2, 2, 2)
+ double3 xformOp:translate = (0, 100, 0)
+ double3 xformOp:translate:scalePivot
+ uniform token[] xformOpOrder = [ "xformOp:translate", "xformOp:rotateXYZ", "xformOp:translate:scalePivot", "xformOp:scale" ]
+ \\endcode
+
+ Paired "Inverted" Ops
+
+ We have been claiming that the ordered list of ops serves as a set
+ of instructions to a transform stack, but you may have noticed in the last
+ example that there is a missing operation - the pivot for the scale op
+ needs to be applied in its inverse-form as a final (most local) op! In the
+ AbcGeom::Xform schema, we would have encoded an actual "final" translation
+ op whose value was authored by the exporter as the negation of the pivot's
+ value. However, doing so would be brittle in USD, given that each op can
+ be independently overridden, and the constraint that one attribute must be
+ maintained as the negation of the other in order for successful
+ re-importation of the schema cannot be expressed in USD.
+
+ Our solution leverages the **xformOpOrder** member of the schema, which,
+ in addition to ordering the ops, may also contain one of two special
+ tokens that address the paired op and "stack resetting" behavior.
+
+ The "paired op" behavior is encoded as an "!invert!" prefix in
+ \\b xformOpOrder, as the result of an AddXformOp(isInverseOp=True) call.
+ The \\b xformOpOrder for the last example would look like:
+ \\code
+ uniform token[] xformOpOrder = [ "xformOp:translate", "xformOp:rotateXYZ", "xformOp:translate:scalePivot", "xformOp:scale", "!invert!xformOp:translate:scalePivot" ]
+ \\endcode
+
+ When asked for its value via UsdGeomXformOp::GetOpTransform(), an
+ "inverted" Op (i.e. the "inverted" half of a set of paired Ops) will fetch
+ the value of its paired attribute and return its negation. This works for
+ all op types - an error will be issued if a "transform" type op is singular
+ and cannot be inverted. When getting the authored value of an inverted op
+ via UsdGeomXformOp::Get(), the raw, uninverted value of the associated
+ attribute is returned.
+
+ For the sake of robustness, setting a value on an inverted op is disallowed.
+ Attempting to set a value on an inverted op will result in a coding error
+ and no value being set.
+
+ Resetting the Transform Stack
+
+ The other special op/token that can appear in \\em xformOpOrder is
+ \\em "!resetXformStack!", which, appearing as the first element of
+ \\em xformOpOrder, indicates this prim should not inherit the transformation
+ of its namespace parent. See SetResetXformStack()
+
+ Expected Behavior for "Missing" Ops
+
+ If an importer expects Scale-Rotate-Translate operations, but a prim
+ has only translate and rotate ops authored, the importer should assume
+ an identity scale. This allows us to optimize the data a bit, if only
+ a few components of a very rich schema (like Maya's) are authored in the
+ app.
+
+ \\anchor usdGeom_xformableExamples
+ Using the C++ API
+
+ #1. Creating a simple transform matrix encoding
+ \\snippet examples_usdGeom.cpp CreateMatrixWithDefault
+
+ #2. Creating the simple SRT from the example above
+ \\snippet examples_usdGeom.cpp CreateExampleSRT
+
+ #3. Creating a parameterized SRT with pivot using UsdGeomXformCommonAPI
+ \\snippet examples_usdGeom.cpp CreateSRTWithDefaults
+
+ #4. Creating a rotate-only pivot transform with animated
+ rotation and translation
+ \\snippet examples_usdGeom.cpp CreateAnimatedTransform
+
+"""
+) {
+
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Scope "Scope" (
+ inherits =
+ doc = """Scope is the simplest grouping primitive, and does not carry the
+ baggage of transformability. Note that transforms should inherit down
+ through a Scope successfully - it is just a guaranteed no-op from a
+ transformability perspective."""
+) {
+}
+
+class Xform "Xform" (
+ inherits =
+ doc = """Concrete prim schema for a transform, which implements Xformable """
+) {
+}
+
+class "Boundable" (
+ inherits =
+ doc = """Boundable introduces the ability for a prim to persistently
+ cache a rectilinear, local-space, extent.
+
+ \\section UsdGeom_Boundable_Extent Why Extent and not Bounds ?
+ Boundable introduces the notion of "extent", which is a cached computation
+ of a prim's local-space 3D range for its resolved attributes at the
+ layer and time in which extent is authored. We have found that with
+ composed scene description, attempting to cache pre-computed bounds at
+ interior prims in a scene graph is very fragile, given the ease with which
+ one can author a single attribute in a stronger layer that can invalidate
+ many authored caches - or with which a re-published, referenced asset can
+ do the same.
+
+ Therefore, we limit to precomputing (generally) leaf-prim extent, which
+ avoids the need to read in large point arrays to compute bounds, and
+ provides UsdGeomBBoxCache the means to efficiently compute and
+ (session-only) cache intermediate bounds. You are free to compute and
+ author intermediate bounds into your scenes, of course, which may work
+ well if you have sufficient locks on your pipeline to guarantee that once
+ authored, the geometry and transforms upon which they are based will
+ remain unchanged, or if accuracy of the bounds is not an ironclad
+ requisite.
+
+ When intermediate bounds are authored on Boundable parents, the child prims
+ will be pruned from BBox computation; the authored extent is expected to
+ incorporate all child bounds."""
+)
+{
+ # XXX: Note this is really a GfRange3f, which is not fully supported
+ # in Vt I/O.
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), \\em without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+}
+
+class "Gprim" (
+ inherits =
+ doc = """Base class for all geometric primitives.
+
+ Gprim encodes basic graphical properties such as \\em doubleSided and
+ \\em orientation, and provides primvars for "display color" and "display
+ opacity" that travel with geometry to be used as shader overrides. """
+
+) {
+ color3f[] primvars:displayColor (
+ customData = {
+ string apiName = "displayColor"
+ }
+ doc = """It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a \\em displayColor parameter."""
+ )
+
+ float[] primvars:displayOpacity (
+ customData = {
+ string apiName = "displayOpacity"
+ }
+ doc = """Companion to \\em displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+
+ uniform bool doubleSided = false (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ \\em orientation. By doing so they can perform "backface culling" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's \\em doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see \\ref UsdGeom_WindingOrder for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+}
+
+class Cube "Cube" (
+ inherits =
+ doc = """Defines a primitive rectilinear cube centered at the origin.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ double size = 2.0 (
+ doc = """Indicates the length of each edge of the cube. If you
+ author \\em size you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+
+ float3[] extent = [(-1.0, -1.0, -1.0), (1.0, 1.0, 1.0)] (
+ doc = """Extent is re-defined on Cube only to provide a fallback value.
+ \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+
+}
+
+class Sphere "Sphere" (
+ inherits =
+ doc = """Defines a primitive sphere centered at the origin.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ double radius = 1.0 (
+ doc = """Indicates the sphere's radius. If you
+ author \\em radius you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+
+ float3[] extent = [(-1.0, -1.0, -1.0), (1.0, 1.0, 1.0)] (
+ doc = """Extent is re-defined on Sphere only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+}
+
+class Cylinder "Cylinder" (
+ inherits =
+ doc = """Defines a primitive cylinder with closed ends, centered at the
+ origin, whose spine is along the specified \\em axis.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ double height = 2 (
+ doc = """The size of the cylinder's spine along the specified
+ \\em axis. If you author \\em height you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ double radius = 1.0 (
+ doc = """The radius of the cylinder. If you author \\em radius
+ you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = """The axis along which the spine of the cylinder is aligned"""
+ )
+
+ float3[] extent = [(-1.0, -1.0, -1.0), (1.0, 1.0, 1.0)] (
+ doc = """Extent is re-defined on Cylinder only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+}
+
+class Capsule "Capsule" (
+ inherits =
+ doc = """Defines a primitive capsule, i.e. a cylinder capped by two half
+ spheres, centered at the origin, whose spine is along the specified
+ \\em axis.
+ The spherical cap heights (sagitta) of the two endcaps are a function of
+ the relative radii of the endcaps, such that cylinder tangent and sphere
+ tangent are coincident and maintain C1 continuity."""
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ double height = 1.0 (
+ doc = """The length of the capsule's spine along the specified
+ \\em axis excluding the size of the two half spheres, i.e.
+ the length of the cylinder portion of the capsule.
+ If you author \\em height you must also author \\em extent.
+ \\sa GetExtentAttr()"""
+ )
+ double radius = 0.5 (
+ doc = """The radius of the capsule. If you
+ author \\em radius you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = """The axis along which the spine of the capsule is aligned"""
+ )
+
+ float3[] extent = [(-0.5, -0.5, -1.0), (0.5, 0.5, 1.0)] (
+ doc = """Extent is re-defined on Capsule only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+}
+
+class Cone "Cone" (
+ inherits =
+ doc = """Defines a primitive cone, centered at the origin, whose spine
+ is along the specified \\em axis, with the apex of the cone pointing
+ in the direction of the positive axis.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ double height = 2.0 (
+ doc = """The length of the cone's spine along the specified
+ \\em axis. If you author \\em height you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ double radius = 1.0 (
+ doc = """The radius of the cone. If you
+ author \\em radius you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = """The axis along which the spine of the cone is aligned"""
+ )
+
+ float3[] extent = [(-1.0, -1.0, -1.0), (1.0, 1.0, 1.0)] (
+ doc = """Extent is re-defined on Cone only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+}
+
+class Cylinder_1 "Cylinder_1" (
+ inherits =
+ doc = """Defines a primitive cylinder with closed ends, centered at the
+ origin, whose spine is along the specified \\em axis, with a pair of radii
+ describing the size of the end points.
+
+ The fallback values for Cube, Sphere, Cone, and Cylinder are set so that
+ they all pack into the same volume/bounds."""
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ double height = 2 (
+ doc = """The length of the cylinder's spine along the specified
+ \\em axis. If you author \\em height you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ double radiusTop = 1.0 (
+ doc = """The radius of the top of the cylinder - i.e. the face located
+ along the positive \\em axis. If you author \\em radiusTop you must also
+ author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ double radiusBottom = 1.0 (
+ doc = """The radius of the bottom of the cylinder - i.e. the face
+ point located along the negative \\em axis. If you author
+ \\em radiusBottom you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = """The axis along which the spine of the cylinder is aligned"""
+ )
+
+ float3[] extent = [(-1.0, -1.0, -1.0), (1.0, 1.0, 1.0)] (
+ doc = """Extent is re-defined on Cylinder only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+}
+
+class Capsule_1 "Capsule_1" (
+ inherits =
+ doc = """Defines a primitive capsule, i.e. a cylinder capped by two half
+ spheres, with potentially different radii, centered at the origin, and whose
+ spine is along the specified \\em axis.
+ The spherical cap heights (sagitta) of the two endcaps are a function of
+ the relative radii of the endcaps, such that cylinder tangent and sphere
+ tangent are coincident and maintain C1 continuity."""
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ double height = 1.0 (
+ doc = """The length of the capsule's spine along the specified
+ \\em axis excluding the size of the two half spheres, i.e.
+ the length of the cylinder portion of the capsule.
+ If you author \\em height you must also author \\em extent.
+ \\sa GetExtentAttr()"""
+ )
+ double radiusTop = 0.5 (
+ doc = """The radius of the capping sphere at the top of the capsule -
+ i.e. the sphere in the direction of the positive \\em axis. If you
+ author \\em radius you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ double radiusBottom = 0.5 (
+ doc = """The radius of the capping sphere at the bottom of the capsule -
+ i.e. the sphere located in the direction of the negative \\em axis. If
+ you author \\em radius you must also author \\em extent.
+
+ \\sa GetExtentAttr()"""
+ )
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = """The axis along which the spine of the capsule is aligned"""
+ )
+
+ float3[] extent = [(-0.5, -0.5, -1.0), (0.5, 0.5, 1.0)] (
+ doc = """Extent is re-defined on Capsule only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+}
+
+class Plane "Plane" (
+ inherits =
+ doc = """Defines a primitive plane, centered at the origin, and is defined by
+ a cardinal axis, width, and length. The plane is double-sided by default.
+
+ The axis of width and length are perpendicular to the plane's \\em axis:
+
+ axis | width | length
+ ----- | ------ | -------
+ X | z-axis | y-axis
+ Y | x-axis | z-axis
+ Z | x-axis | y-axis
+
+ """
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ uniform bool doubleSided = true (
+ doc = """Planes are double-sided by default. Clients may also support
+ single-sided planes.
+
+ \\sa UsdGeomGprim::GetDoubleSidedAttr()"""
+ )
+ double width = 2.0 (
+ doc = """The width of the plane, which aligns to the x-axis when \\em axis is
+ 'Z' or 'Y', or to the z-axis when \\em axis is 'X'. If you author \\em width
+ you must also author \\em extent.
+
+ \\sa UsdGeomGprim::GetExtentAttr()"""
+ )
+ double length = 2.0 (
+ doc = """The length of the plane, which aligns to the y-axis when \\em axis is
+ 'Z' or 'X', or to the z-axis when \\em axis is 'Y'. If you author \\em length
+ you must also author \\em extent.
+
+ \\sa UsdGeomGprim::GetExtentAttr()"""
+ )
+ uniform token axis = "Z" (
+ allowedTokens = ["X", "Y", "Z"]
+ doc = """The axis along which the surface of the plane is aligned. When set
+ to 'Z' the plane is in the xy-plane; when \\em axis is 'X' the plane is in
+ the yz-plane, and when \\em axis is 'Y' the plane is in the xz-plane.
+
+ \\sa UsdGeomGprim::GetAxisAttr()."""
+ )
+ float3[] extent = [(-1.0, -1.0, 0.0), (1.0, 1.0, 0.0)] (
+ doc = """Extent is re-defined on Plane only to provide a fallback
+ value. \\sa UsdGeomGprim::GetExtentAttr()."""
+ )
+}
+
+class "PointBased" (
+ doc = """Base class for all UsdGeomGprims that possess points,
+ providing common attributes such as normals and velocities."""
+
+ inherits =
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+ # positional
+ point3f[] points (
+ doc = """The primary geometry attribute for all PointBased
+ primitives, describes points in (local) space."""
+ )
+
+ vector3f[] velocities (
+ doc = """If provided, 'velocities' should be used by renderers to
+
+ compute positions between samples for the 'points' attribute, rather
+ than interpolating between neighboring 'points' samples. This is the
+ only reasonable means of computing motion blur for topologically
+ varying PointBased primitives. It follows that the length of each
+ 'velocities' sample must match the length of the corresponding
+ 'points' sample. Velocity is measured in position units per second,
+ as per most simulation software. To convert to position units per
+ UsdTimeCode, divide by UsdStage::GetTimeCodesPerSecond().
+
+ See also \\ref UsdGeom_VelocityInterpolation ."""
+ )
+
+ vector3f[] accelerations (
+ doc = """If provided, 'accelerations' should be used with
+ velocities to compute positions between samples for the 'points'
+ attribute rather than interpolating between neighboring 'points'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+
+ # shaping
+ normal3f[] normals (
+ doc = """Provide an object-space orientation for individual points,
+ which, depending on subclass, may define a surface, curve, or free
+ points. Note that 'normals' should not be authored on any Mesh that
+ is subdivided, since the subdivision algorithm will define its own
+ normals. 'normals' is not a generic primvar, but the number of elements
+ in this attribute will be determined by its 'interpolation'. See
+ \\ref SetNormalsInterpolation() . If 'normals' and 'primvars:normals'
+ are both specified, the latter has precedence."""
+ )
+}
+
+class Mesh "Mesh" (
+ inherits =
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usd/timeCode.h" """
+ }
+ doc="""Encodes a mesh with optional subdivision properties and features.
+
+ As a point-based primitive, meshes are defined in terms of points that
+ are connected into edges and faces. Many references to meshes use the
+ term 'vertex' in place of or interchangeably with 'points', while some
+ use 'vertex' to refer to the 'face-vertices' that define a face. To
+ avoid confusion, the term 'vertex' is intentionally avoided in favor of
+ 'points' or 'face-vertices'.
+
+ The connectivity between points, edges and faces is encoded using a
+ common minimal topological description of the faces of the mesh. Each
+ face is defined by a set of face-vertices using indices into the Mesh's
+ _points_ array (inherited from UsdGeomPointBased) and laid out in a
+ single linear _faceVertexIndices_ array for efficiency. A companion
+ _faceVertexCounts_ array provides, for each face, the number of
+ consecutive face-vertices in _faceVertexIndices_ that define the face.
+ No additional connectivity information is required or constructed, so
+ no adjacency or neighborhood queries are available.
+
+ A key property of this mesh schema is that it encodes both subdivision
+ surfaces and simpler polygonal meshes. This is achieved by varying the
+ _subdivisionScheme_ attribute, which is set to specify Catmull-Clark
+ subdivision by default, so polygonal meshes must always be explicitly
+ declared. The available subdivision schemes and additional subdivision
+ features encoded in optional attributes conform to the feature set of
+ OpenSubdiv
+ (https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html).
+
+ \\anchor UsdGeom_Mesh_Primvars
+ __A Note About Primvars__
+
+ The following list clarifies the number of elements for and the
+ interpolation behavior of the different primvar interpolation types
+ for meshes:
+
+ - __constant__: One element for the entire mesh; no interpolation.
+ - __uniform__: One element for each face of the mesh; elements are
+ typically not interpolated but are inherited by other faces derived
+ from a given face (via subdivision, tessellation, etc.).
+ - __varying__: One element for each point of the mesh;
+ interpolation of point data is always linear.
+ - __vertex__: One element for each point of the mesh;
+ interpolation of point data is applied according to the
+ _subdivisionScheme_ attribute.
+ - __faceVarying__: One element for each of the face-vertices that
+ define the mesh topology; interpolation of face-vertex data may
+ be smooth or linear, according to the _subdivisionScheme_ and
+ _faceVaryingLinearInterpolation_ attributes.
+
+ Primvar interpolation types and related utilities are described more
+ generally in \\ref Usd_InterpolationVals.
+
+ \\anchor UsdGeom_Mesh_Normals
+ __A Note About Normals__
+
+ Normals should not be authored on a subdivision mesh, since subdivision
+ algorithms define their own normals. They should only be authored for
+ polygonal meshes (_subdivisionScheme_ = "none").
+
+ The _normals_ attribute inherited from UsdGeomPointBased is not a generic
+ primvar, but the number of elements in this attribute will be determined by
+ its _interpolation_. See \\ref UsdGeomPointBased::GetNormalsInterpolation() .
+ If _normals_ and _primvars:normals_ are both specified, the latter has
+ precedence. If a polygonal mesh specifies __neither__ _normals_ nor
+ _primvars:normals_, then it should be treated and rendered as faceted,
+ with no attempt to compute smooth normals.
+
+ The normals generated for smooth subdivision schemes, e.g. Catmull-Clark
+ and Loop, will likewise be smooth, but others, e.g. Bilinear, may be
+ discontinuous between faces and/or within non-planar irregular faces."""
+) {
+ #
+ # Common Properties
+ #
+ int[] faceVertexIndices (
+ doc = """Flat list of the index (into the _points_ attribute) of each
+ vertex of each face in the mesh. If this attribute has more than
+ one timeSample, the mesh is considered to be topologically varying."""
+ )
+
+ int[] faceVertexCounts (
+ doc = """Provides the number of vertices in each face of the mesh,
+ which is also the number of consecutive indices in _faceVertexIndices_
+ that define the face. The length of this attribute is the number of
+ faces in the mesh. If this attribute has more than
+ one timeSample, the mesh is considered to be topologically varying."""
+ )
+
+ #
+ # Subdiv Properties
+ #
+
+ uniform token subdivisionScheme = "catmullClark" (
+ allowedTokens = ["catmullClark", "loop", "bilinear", "none"]
+ doc = """The subdivision scheme to be applied to the surface.
+ Valid values are:
+
+ - __catmullClark__: The default, Catmull-Clark subdivision; preferred
+ for quad-dominant meshes (generalizes B-splines); interpolation
+ of point data is smooth (non-linear)
+ - __loop__: Loop subdivision; preferred for purely triangular meshes;
+ interpolation of point data is smooth (non-linear)
+ - __bilinear__: Subdivision reduces all faces to quads (topologically
+ similar to "catmullClark"); interpolation of point data is bilinear
+ - __none__: No subdivision, i.e. a simple polygonal mesh; interpolation
+ of point data is linear
+
+ Polygonal meshes are typically lighter weight and faster to render,
+ depending on renderer and render mode. Use of "bilinear" will produce
+ a similar shape to a polygonal mesh and may offer additional guarantees
+ of watertightness and additional subdivision features (e.g. holes) but
+ may also not respect authored normals.""")
+
+ token interpolateBoundary = "edgeAndCorner" (
+ allowedTokens = ["none", "edgeOnly", "edgeAndCorner"]
+ doc = """Specifies how subdivision is applied for faces adjacent to
+ boundary edges and boundary points. Valid values correspond to choices
+ available in OpenSubdiv:
+
+ - __none__: No boundary interpolation is applied and boundary faces are
+ effectively treated as holes
+ - __edgeOnly__: A sequence of boundary edges defines a smooth curve to
+ which the edges of subdivided boundary faces converge
+ - __edgeAndCorner__: The default, similar to "edgeOnly" but the smooth
+ boundary curve is made sharp at corner points
+
+ These are illustrated and described in more detail in the OpenSubdiv
+ documentation:
+ https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html#boundary-interpolation-rules""")
+
+ token faceVaryingLinearInterpolation = "cornersPlus1" (
+ allowedTokens = ["none", "cornersOnly", "cornersPlus1",
+ "cornersPlus2", "boundaries", "all"]
+ doc = """Specifies how elements of a primvar of interpolation type
+ "faceVarying" are interpolated for subdivision surfaces. Interpolation
+ can be as smooth as a "vertex" primvar or constrained to be linear at
+ features specified by several options. Valid values correspond to
+ choices available in OpenSubdiv:
+
+ - __none__: No linear constraints or sharpening, smooth everywhere
+ - __cornersOnly__: Sharpen corners of discontinuous boundaries only,
+ smooth everywhere else
+ - __cornersPlus1__: The default, same as "cornersOnly" plus additional
+ sharpening at points where three or more distinct face-varying
+ values occur
+ - __cornersPlus2__: Same as "cornersPlus1" plus additional sharpening
+ at points with at least one discontinuous boundary corner or
+ only one discontinuous boundary edge (a dart)
+ - __boundaries__: Piecewise linear along discontinuous boundaries,
+ smooth interior
+ - __all__: Piecewise linear everywhere
+
+ These are illustrated and described in more detail in the OpenSubdiv
+ documentation:
+ https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html#face-varying-interpolation-rules""")
+
+ token triangleSubdivisionRule = "catmullClark" (
+ allowedTokens = ["catmullClark", "smooth"]
+ doc = """Specifies an option to the subdivision rules for the
+ Catmull-Clark scheme to try and improve undesirable artifacts when
+ subdividing triangles. Valid values are "catmullClark" for the
+ standard rules (the default) and "smooth" for the improvement.
+
+ See https://graphics.pixar.com/opensubdiv/docs/subdivision_surfaces.html#triangle-subdivision-rule""")
+
+ int[] holeIndices = [] (
+ doc = """The indices of all faces that should be treated as holes,
+ i.e. made invisible. This is traditionally a feature of subdivision
+ surfaces and not generally applied to polygonal meshes.""")
+
+ int[] cornerIndices = [] (
+ doc = """The indices of points for which a corresponding sharpness
+ value is specified in _cornerSharpnesses_ (so the size of this array
+ must match that of _cornerSharpnesses_).""")
+
+ float[] cornerSharpnesses = [] (
+ doc = """The sharpness values associated with a corresponding set of
+ points specified in _cornerIndices_ (so the size of this array must
+ match that of _cornerIndices_). Use the constant `SHARPNESS_INFINITE`
+ for a perfectly sharp corner.""")
+
+ int[] creaseIndices = [] (
+ doc = """The indices of points grouped into sets of successive pairs
+ that identify edges to be creased. The size of this array must be
+ equal to the sum of all elements of the _creaseLengths_ attribute.""")
+
+ int[] creaseLengths = [] (
+ doc = """The length of this array specifies the number of creases
+ (sets of adjacent sharpened edges) on the mesh. Each element gives
+ the number of points of each crease, whose indices are successively
+ laid out in the _creaseIndices_ attribute. Since each crease must
+ be at least one edge long, each element of this array must be at
+ least two.""")
+
+ float[] creaseSharpnesses = [] (
+ doc = """The per-crease or per-edge sharpness values for all creases.
+ Since _creaseLengths_ encodes the number of points in each crease,
+ the number of elements in this array will be either len(creaseLengths)
+ or the sum over all X of (creaseLengths[X] - 1). Note that while
+ the RI spec allows each crease to have either a single sharpness
+ or a value per-edge, USD will encode either a single sharpness
+ per crease on a mesh, or sharpnesses for all edges making up
+ the creases on a mesh. Use the constant `SHARPNESS_INFINITE` for a
+ perfectly sharp crease.""")
+}
+
+class TetMesh "TetMesh" (
+ inherits =
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usd/timeCode.h" """
+ }
+ doc="""Encodes a tetrahedral mesh. A tetrahedral mesh is defined as a set of
+ tetrahedra. Each tetrahedron is defined by a set of 4 points, with the
+ triangles of the tetrahedron determined from these 4 points as described in
+ the tetVertexIndices attribute description. The mesh surface faces
+ are encoded as triangles. Surface faces must be provided for consumers
+ that need to do surface calculations, such as renderers or consumers using
+ physics attachments. Both tetrahedra and surface face definitions use
+ indices into the TetMesh's points attribute, inherited from
+ UsdGeomPointBased."""
+) {
+ #
+ # Common Properties
+ #
+ int4[] tetVertexIndices (
+ doc = """Flat list of the index (into the points attribute) of
+ each vertex of each tetrahedron in the mesh. Each int4 corresponds to the
+ indices of a single tetrahedron. Users should set the orientation
+ attribute of UsdGeomPrim accordingly. That is if the orientation
+ is "rightHanded", the CCW face ordering of a tetrahedron is
+ [123],[032],[013],[021] with respect to the int4. This results in the
+ normals facing outward from the center of the tetrahedron. The following
+ diagram shows the face ordering of an unwrapped tetrahedron with
+ "rightHanded" orientation.
+
+ \\image html USDTetMeshRightHanded.svg
+
+ If the orientation attribute is set to "leftHanded" the face
+ ordering of the tetrahedron is [321],[230],[310],[120] and the
+ leftHanded CW face normals point outward from the center of the
+ tetrahedron. The following diagram shows the face ordering of an
+ unwrapped tetrahedron with "leftHanded" orientation.
+
+ \\image html USDTetMeshLeftHanded.svg
+
+ Setting the orientation attribute to align with the
+ ordering of the int4 for the tetrahedrons is the responsibility of the
+ user."""
+ )
+
+ int3[] surfaceFaceVertexIndices (
+ doc = """surfaceFaceVertexIndices defines the triangle
+ surface faces indices wrt. points of the tetmesh surface. Again
+ the orientation attribute inherited from UsdGeomPrim should be
+ set accordingly. The orientation for faces of tetrahedra and
+ surface faces must match."""
+ )
+}
+
+class GeomSubset "GeomSubset" (
+ inherits =
+ doc = """Encodes a subset of a piece of geometry (i.e. a UsdGeomImageable)
+ as a set of indices. Currently supports encoding subsets of faces,
+ points, edges, and tetrahedrons.
+
+ To apply to a geometric prim, a GeomSubset prim must be the prim's direct
+ child in namespace, and possess a concrete defining specifier (i.e. def).
+ This restriction makes it easy and efficient to discover subsets of a prim.
+ We might want to relax this restriction if it's common to have multiple
+ families of subsets on a gprim and if it's useful to be able to
+ organize subsets belonging to a family under a common scope. See
+ 'familyName' attribute for more info on defining a family of subsets.
+
+ Note that a GeomSubset isn't an imageable (i.e. doesn't derive from
+ UsdGeomImageable). So, you can't author visibility for it or
+ override its purpose.
+
+ Materials are bound to GeomSubsets just as they are for regular
+ geometry using API available in UsdShade (UsdShadeMaterial::Bind).
+"""
+ customData = {
+ string className = "Subset"
+ string extraIncludes = """
+#include "pxr/base/tf/token.h"
+#include "pxr/usd/usdGeom/imageable.h"
+#include "pxr/usd/usdGeom/mesh.h"
+#include "pxr/usd/usdGeom/tetMesh.h"
+"""
+ }
+)
+{
+ uniform token elementType = "face" (
+ allowedTokens = ["face", "point", "edge", "tetrahedron"]
+ doc = """The type of element that the indices target. "elementType" can
+ have one of the following values:
+
face: Identifies faces on a Gprim's surface. For a
+ UsdGeomMesh, each element of the _indices_ attribute would refer to
+ an element of the Mesh's _faceCounts_ attribute. For a UsdGeomTetMesh,
+ each element of the _indices_ attribute would refer to an element of
+ the Mesh's _surfaceFaceVertexIndices_ attribute.
+
point: for any UsdGeomPointBased, each
+ element of the _indices_ attribute would refer to an element of the
+ Mesh's _points_ attribute
+
edge: for any UsdGeomMesh, each pair of elements
+ in the _indices_ attribute would refer to a pair of points of the
+ Mesh's _points_ attribute that are connected as an implicit edge on the
+ Mesh. These edges are derived from the Mesh's _faceVertexIndices_
+ attribute. Edges are not currently defined for a UsdGeomTetMesh, but
+ could be derived from all tetrahedron edges or surface face edges only
+ if a specific use-case arises.
+
tetrahedron: for any UsdGeomTetMesh, each element of the
+ _indices_ attribute would refer to an element of the TetMesh's
+ _tetVertexIndices_ attribute.
+
"""
+ )
+ int[] indices = [] (
+ doc = """The set of indices included in this subset. The indices need not
+ be sorted, but the same index should not appear more than once. Indices
+ are invalid if outside the range [0, elementCount) for the given time on
+ the parent geometric prim."""
+ )
+ uniform token familyName = "" (
+ doc = """The name of the family of subsets that this subset belongs to.
+ This is optional and is primarily useful when there are multiple
+ families of subsets under a geometric prim. In some cases, this could
+ also be used for achieving proper roundtripping of subset data between
+ DCC apps.
+ When multiple subsets belonging to a prim have the same familyName, they
+ are said to belong to the family. A familyType value can be
+ encoded on the owner of a family of subsets as a token using the static
+ method UsdGeomSubset::SetFamilyType(). "familyType" can have one of the
+ following values:
+
UsdGeomTokens->partition: implies that every element of
+ the whole geometry appears exactly once in only one of the subsets
+ belonging to the family.
+
UsdGeomTokens->nonOverlapping: an element that appears in one
+ subset may not appear in any other subset belonging to the family, and
+ appears only once in the subset in which it appears.
+
UsdGeomTokens->unrestricted: implies that there are no
+ restrictions w.r.t. the membership of elements in the subsets. They
+ could be overlapping and the union of all subsets in the family may
+ not represent the whole.
+
+ \\note The validity of subset data is not enforced by the authoring
+ APIs, however they can be checked using UsdGeomSubset::ValidateFamily().
+ """
+ )
+}
+
+class NurbsPatch "NurbsPatch" (
+ inherits =
+ doc = """Encodes a rational or polynomial non-uniform B-spline
+ surface, with optional trim curves.
+
+ The encoding mostly follows that of RiNuPatch and RiTrimCurve:
+ https://renderman.pixar.com/resources/RenderMan_20/geometricPrimitives.html#rinupatch , with some minor renaming and coalescing for clarity.
+
+ The layout of control vertices in the \\em points attribute inherited
+ from UsdGeomPointBased is row-major with U considered rows, and V columns.
+
+ \\anchor UsdGeom_NurbsPatch_Form
+ NurbsPatch Form
+
+ The authored points, orders, knots, weights, and ranges are all that is
+ required to render the nurbs patch. However, the only way to model closed
+ surfaces with nurbs is to ensure that the first and last control points
+ along the given axis are coincident. Similarly, to ensure the surface is
+ not only closed but also C2 continuous, the last \\em order - 1 control
+ points must be (correspondingly) coincident with the first \\em order - 1
+ control points, and also the spacing of the last corresponding knots
+ must be the same as the first corresponding knots.
+
+ Form is provided as an aid to interchange between modeling and
+ animation applications so that they can robustly identify the intent with
+ which the surface was modelled, and take measures (if they are able) to
+ preserve the continuity/concidence constraints as the surface may be rigged
+ or deformed.
+ \\li An \\em open-form NurbsPatch has no continuity constraints.
+ \\li A \\em closed-form NurbsPatch expects the first and last control points
+ to overlap
+ \\li A \\em periodic-form NurbsPatch expects the first and last
+ \\em order - 1 control points to overlap.
+
+ Nurbs vs Subdivision Surfaces
+
+ Nurbs are an important modeling primitive in CAD/CAM tools and early
+ computer graphics DCC's. Because they have a natural UV parameterization
+ they easily support "trim curves", which allow smooth shapes to be
+ carved out of the surface.
+
+ However, the topology of the patch is always rectangular, and joining two
+ nurbs patches together (especially when they have differing numbers of
+ spans) is difficult to do smoothly. Also, nurbs are not supported by
+ the Ptex texturing technology (http://ptex.us).
+
+ Neither of these limitations are shared by subdivision surfaces; therefore,
+ although they do not subscribe to trim-curve-based shaping, subdivs are
+ often considered a more flexible modeling primitive.
+ """
+) {
+ int uVertexCount (
+ doc = """Number of vertices in the U direction. Should be at least as
+ large as uOrder."""
+ )
+
+ int vVertexCount (
+ doc = """Number of vertices in the V direction. Should be at least as
+ large as vOrder."""
+ )
+
+ int uOrder (
+ doc = """Order in the U direction. Order must be positive and is
+ equal to the degree of the polynomial basis to be evaluated, plus 1."""
+ )
+
+ int vOrder (
+ doc = """Order in the V direction. Order must be positive and is
+ equal to the degree of the polynomial basis to be evaluated, plus 1."""
+ )
+
+ double[] uKnots (
+ doc = """Knot vector for U direction providing U parameterization.
+ The length of this array must be ( uVertexCount + uOrder ), and its
+ entries must take on monotonically increasing values."""
+ )
+
+ double[] vKnots (
+ doc = """Knot vector for V direction providing U parameterization.
+ The length of this array must be ( vVertexCount + vOrder ), and its
+ entries must take on monotonically increasing values."""
+ )
+
+ uniform token uForm = "open" (
+ allowedTokens = ["open", "closed", "periodic"]
+ doc = """Interpret the control grid and knot vectors as representing
+ an open, geometrically closed, or geometrically closed and C2 continuous
+ surface along the U dimension.
+ \\sa \\ref UsdGeom_NurbsPatch_Form "NurbsPatch Form" """
+ )
+
+ uniform token vForm = "open" (
+ allowedTokens = ["open", "closed", "periodic"]
+ doc = """Interpret the control grid and knot vectors as representing
+ an open, geometrically closed, or geometrically closed and C2 continuous
+ surface along the V dimension.
+ \\sa \\ref UsdGeom_NurbsPatch_Form "NurbsPatch Form" """
+ )
+
+ # Alembic's NuPatch does not encode these... wonder how they
+ # get away with that? Just assume it's the full range, presumably.
+ double2 uRange (
+ doc = """Provides the minimum and maximum parametric values (as defined
+ by uKnots) over which the surface is actually defined. The minimum
+ must be less than the maximum, and greater than or equal to the
+ value of uKnots[uOrder-1]. The maxium must be less than or equal
+ to the last element's value in uKnots."""
+ )
+
+ double2 vRange (
+ doc = """Provides the minimum and maximum parametric values (as defined
+ by vKnots) over which the surface is actually defined. The minimum
+ must be less than the maximum, and greater than or equal to the
+ value of vKnots[vOrder-1]. The maxium must be less than or equal
+ to the last element's value in vKnots."""
+ )
+
+ double[] pointWeights (
+ doc = """Optionally provides "w" components for each control point,
+ thus must be the same length as the points attribute. If authored,
+ the patch will be rational. If unauthored, the patch will be
+ polynomial, i.e. weight for all points is 1.0.
+ \\note Some DCC's pre-weight the \\em points, but in this schema,
+ \\em points are not pre-weighted."""
+ )
+
+ int[] trimCurve:counts (
+ doc = """Each element specifies how many curves are present in each
+ "loop" of the trimCurve, and the length of the array determines how
+ many loops the trimCurve contains. The sum of all elements is the
+ total nuber of curves in the trim, to which we will refer as
+ \\em nCurves in describing the other trim attributes."""
+ )
+
+ int[] trimCurve:orders (
+ doc = """Flat list of orders for each of the \\em nCurves curves."""
+ )
+
+ int[] trimCurve:vertexCounts (
+ doc = """Flat list of number of vertices for each of the
+ \\em nCurves curves."""
+ )
+
+ double[] trimCurve:knots (
+ doc = """Flat list of parametric values for each of the
+ \\em nCurves curves. There will be as many knots as the sum over
+ all elements of \\em vertexCounts plus the sum over all elements of
+ \\em orders."""
+ )
+
+ double2[] trimCurve:ranges (
+ doc = """Flat list of minimum and maximum parametric values
+ (as defined by \\em knots) for each of the \\em nCurves curves."""
+ )
+
+ double3[] trimCurve:points (
+ doc = """Flat list of homogeneous 2D points (u, v, w) that comprise
+ the \\em nCurves curves. The number of points should be equal to the
+ um over all elements of \\em vertexCounts."""
+ )
+
+}
+
+class "Curves" (
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+ inherits =
+ doc = """Base class for UsdGeomBasisCurves, UsdGeomNurbsCurves, and
+ UsdGeomHermiteCurves. The BasisCurves schema is designed to be
+ analagous to offline renderers' notion of batched curves (such as
+ the classical RIB definition via Basis and Curves statements),
+ while the NurbsCurve schema is designed to be analgous to the
+ NURBS curves found in packages like Maya and Houdini while
+ retaining their consistency with the RenderMan specification for
+ NURBS Patches. HermiteCurves are useful for the
+ interchange of animation guides and paths.
+
+ It is safe to use the length of the curve vertex count to derive
+ the number of curves and the number and layout of curve vertices,
+ but this schema should NOT be used to derive the number of curve
+ points. While vertex indices are implicit in all shipped
+ descendent types of this schema, one should not assume that all
+ internal or future shipped schemas will follow this pattern. Be
+ sure to key any indexing behavior off the concrete type, not this
+ abstract type.
+ """
+) {
+ # topology attributes
+ int[] curveVertexCounts (
+ doc = """Curves-derived primitives can represent multiple distinct,
+ potentially disconnected curves. The length of 'curveVertexCounts'
+ gives the number of such curves, and each element describes the
+ number of vertices in the corresponding curve"""
+ )
+
+ # shaping attributes
+ float[] widths (
+ doc = """Provides width specification for the curves, whose application
+ will depend on whether the curve is oriented (normals are defined for
+ it), in which case widths are "ribbon width", or unoriented, in which
+ case widths are cylinder width. 'widths' is not a generic Primvar,
+ but the number of elements in this attribute will be determined by
+ its 'interpolation'. See \\ref SetWidthsInterpolation() . If 'widths'
+ and 'primvars:widths' are both specified, the latter has precedence."""
+ )
+}
+
+class BasisCurves "BasisCurves" (
+ inherits =
+ doc = """BasisCurves are a batched curve representation analogous to the
+ classic RIB definition via Basis and Curves statements. BasisCurves are
+ often used to render dense aggregate geometry like hair or grass.
+
+ A 'matrix' and 'vstep' associated with the \\em basis are used to
+ interpolate the vertices of a cubic BasisCurves. (The basis attribute
+ is unused for linear BasisCurves.)
+
+ A single prim may have many curves whose count is determined implicitly by
+ the length of the \\em curveVertexCounts vector. Each individual curve is
+ composed of one or more segments. Each segment is defined by four vertices
+ for cubic curves and two vertices for linear curves. See the next section
+ for more information on how to map curve vertex counts to segment counts.
+
+ \\section UsdGeomBasisCurves_Segment Segment Indexing
+ Interpolating a curve requires knowing how to decompose it into its
+ individual segments.
+
+ The segments of a cubic curve are determined by the vertex count,
+ the \\em wrap (periodicity), and the vstep of the basis. For linear
+ curves, the basis token is ignored and only the vertex count and
+ wrap are needed.
+
+ cubic basis | vstep
+ ------------- | ------
+ bezier | 3
+ catmullRom | 1
+ bspline | 1
+
+ The first segment of a cubic (nonperiodic) curve is always defined by its
+ first four points. The vstep is the increment used to determine what
+ vertex indices define the next segment. For a two segment (nonperiodic)
+ bspline basis curve (vstep = 1), the first segment will be defined by
+ interpolating vertices [0, 1, 2, 3] and the second segment will be defined
+ by [1, 2, 3, 4]. For a two segment bezier basis curve (vstep = 3), the
+ first segment will be defined by interpolating vertices [0, 1, 2, 3] and
+ the second segment will be defined by [3, 4, 5, 6]. If the vstep is not
+ one, then you must take special care to make sure that the number of cvs
+ properly divides by your vstep. (The indices described are relative to
+ the initial vertex index for a batched curve.)
+
+ For periodic curves, at least one of the curve's initial vertices are
+ repeated to close the curve. For cubic curves, the number of vertices
+ repeated is '4 - vstep'. For linear curves, only one vertex is repeated
+ to close the loop.
+
+ Pinned curves are a special case of nonperiodic curves that only affects
+ the behavior of cubic Bspline and Catmull-Rom curves. To evaluate or render
+ pinned curves, a client must effectively add 'phantom points' at the
+ beginning and end of every curve in a batch. These phantom points
+ are injected to ensure that the interpolated curve begins at P[0] and
+ ends at P[n-1].
+
+ For a curve with initial point P[0] and last point P[n-1], the phantom
+ points are defined as.
+ P[-1] = 2 * P[0] - P[1]
+ P[n] = 2 * P[n-1] - P[n-2]
+
+ Pinned cubic curves will (usually) have to be unpacked into the standard
+ nonperiodic representation before rendering. This unpacking can add some
+ additional overhead. However, using pinned curves reduces the amount of
+ data recorded in a scene and (more importantly) better records the
+ authors' intent for interchange.
+
+ \\note The additional phantom points mean that the minimum curve vertex
+ count for cubic bspline and catmullRom curves is 2.
+
+ Linear curve segments are defined by two vertices.
+ A two segment linear curve's first segment would be defined by
+ interpolating vertices [0, 1]. The second segment would be defined by
+ vertices [1, 2]. (Again, for a batched curve, indices are relative to
+ the initial vertex index.)
+
+ When validating curve topology, each renderable entry in the
+ curveVertexCounts vector must pass this check.
+
+ type | wrap | validitity
+ ------- | --------------------------- | ----------------
+ linear | nonperiodic | curveVertexCounts[i] > 2
+ linear | periodic | curveVertexCounts[i] > 3
+ cubic | nonperiodic | (curveVertexCounts[i] - 4) % vstep == 0
+ cubic | periodic | (curveVertexCounts[i]) % vstep == 0
+ cubic | pinned (catmullRom/bspline) | (curveVertexCounts[i] - 2) >= 0
+
+ \\section UsdGeomBasisCurves_BasisMatrix Cubic Vertex Interpolation
+
+ \\image html USDCurveBasisMatrix.png width=750
+
+ \\section UsdGeomBasisCurves_Linear Linear Vertex Interpolation
+
+ Linear interpolation is always used on curves of type linear.
+ 't' with domain [0, 1], the curve is defined by the equation
+ P0 * (1-t) + P1 * t. t at 0 describes the first point and t at 1 describes
+ the end point.
+
+ \\section UsdGeomBasisCurves_PrimvarInterpolation Primvar Interpolation
+
+ For cubic curves, primvar data can be either interpolated cubically between
+ vertices or linearly across segments. The corresponding token
+ for cubic interpolation is 'vertex' and for linear interpolation is
+ 'varying'. Per vertex data should be the same size as the number
+ of vertices in your curve. Segment varying data is dependent on the
+ wrap (periodicity) and number of segments in your curve. For linear curves,
+ varying and vertex data would be interpolated the same way. By convention
+ varying is the preferred interpolation because of the association of
+ varying with linear interpolation.
+
+ \\image html USDCurvePrimvars.png
+
+ To convert an entry in the curveVertexCounts vector into a segment count
+ for an individual curve, apply these rules. Sum up all the results in
+ order to compute how many total segments all curves have.
+
+ The following tables describe the expected segment count for the 'i'th
+ curve in a curve batch as well as the entire batch. Python syntax
+ like '[:]' (to describe all members of an array) and 'len(...)'
+ (to describe the length of an array) are used.
+
+ type | wrap | curve segment count | batch segment count
+ ------- | --------------------------- | -------------------------------------- | --------------------------
+ linear | nonperiodic | curveVertexCounts[i] - 1 | sum(curveVertexCounts[:]) - len(curveVertexCounts)
+ linear | periodic | curveVertexCounts[i] | sum(curveVertexCounts[:])
+ cubic | nonperiodic | (curveVertexCounts[i] - 4) / vstep + 1 | sum(curveVertexCounts[:] - 4) / vstep + len(curveVertexCounts)
+ cubic | periodic | curveVertexCounts[i] / vstep | sum(curveVertexCounts[:]) / vstep
+ cubic | pinned (catmullRom/bspline) | (curveVertexCounts[i] - 2) + 1 | sum(curveVertexCounts[:] - 2) + len(curveVertexCounts)
+
+ The following table descrives the expected size of varying
+ (linearly interpolated) data, derived from the segment counts computed
+ above.
+
+ wrap | curve varying count | batch varying count
+ ------------------- | ---------------------------- | ------------------------------------------------
+ nonperiodic/pinned | segmentCounts[i] + 1 | sum(segmentCounts[:]) + len(curveVertexCounts)
+ periodic | segmentCounts[i] | sum(segmentCounts[:])
+
+ Both curve types additionally define 'constant' interpolation for the
+ entire prim and 'uniform' interpolation as per curve data.
+
+
+ \\note Take care when providing support for linearly interpolated data for
+ cubic curves. Its shape doesn't provide a one to one mapping with either
+ the number of curves (like 'uniform') or the number of vertices (like
+ 'vertex') and so it is often overlooked. This is the only primitive in
+ UsdGeom (as of this writing) where this is true. For meshes, while they
+ use different interpolation methods, 'varying' and 'vertex' are both
+ specified per point. It's common to assume that curves follow a similar
+ pattern and build in structures and language for per primitive, per
+ element, and per point data only to come upon these arrays that don't
+ quite fit into either of those categories. It is
+ also common to conflate 'varying' with being per segment data and use the
+ segmentCount rules table instead of its neighboring varying data table
+ rules. We suspect that this is because for the common case of
+ nonperiodic cubic curves, both the provided segment count and varying data
+ size formula end with '+ 1'. While debugging, users may look at the double
+ '+ 1' as a mistake and try to remove it. We take this time to enumerate
+ these issues because we've fallen into them before and hope that we save
+ others time in their own implementations.
+
+ As an example of deriving per curve segment and varying primvar data counts from
+ the wrap, type, basis, and curveVertexCount, the following table is provided.
+
+ wrap | type | basis | curveVertexCount | curveSegmentCount | varyingDataCount
+ ------------- | ------- | ------- | ----------------- | ------------------ | -------------------------
+ nonperiodic | linear | N/A | [2 3 2 5] | [1 2 1 4] | [2 3 2 5]
+ nonperiodic | cubic | bezier | [4 7 10 4 7] | [1 2 3 1 2] | [2 3 4 2 3]
+ nonperiodic | cubic | bspline | [5 4 6 7] | [2 1 3 4] | [3 2 4 5]
+ periodic | cubic | bezier | [6 9 6] | [2 3 2] | [2 3 2]
+ periodic | linear | N/A | [3 7] | [3 7] | [3 7]
+
+ \\section UsdGeomBasisCurves_TubesAndRibbons Tubes and Ribbons
+
+ The strictest definition of a curve as an infinitely thin wire is not
+ particularly useful for describing production scenes. The additional
+ \\em widths and \\em normals attributes can be used to describe cylindrical
+ tubes and or flat oriented ribbons.
+
+ Curves with only widths defined are imaged as tubes with radius
+ 'width / 2'. Curves with both widths and normals are imaged as ribbons
+ oriented in the direction of the interpolated normal vectors.
+
+ While not technically UsdGeomPrimvars, widths and normals
+ also have interpolation metadata. It's common for authored widths to have
+ constant, varying, or vertex interpolation
+ (see UsdGeomCurves::GetWidthsInterpolation()). It's common for
+ authored normals to have varying interpolation
+ (see UsdGeomPointBased::GetNormalsInterpolation()).
+
+ \\image html USDCurveHydra.png
+
+ The file used to generate these curves can be found in
+ extras/usd/examples/usdGeomExamples/basisCurves.usda. It's provided
+ as a reference on how to properly image both tubes and ribbons. The first
+ row of curves are linear; the second are cubic bezier. (We aim in future
+ releases of HdSt to fix the discontinuity seen with broken tangents to
+ better match offline renderers like RenderMan.) The yellow and violet
+ cubic curves represent cubic vertex width interpolation for which there is
+ no equivalent for linear curves.
+
+ \\note How did this prim type get its name? This prim is a portmanteau of
+ two different statements in the original RenderMan specification:
+ 'Basis' and 'Curves'.
+"""
+) {
+ # interpolation attributes
+ uniform token type = "cubic" (
+ allowedTokens = ["linear", "cubic"]
+ doc = """Linear curves interpolate linearly between two vertices.
+ Cubic curves use a basis matrix with four vertices to interpolate a segment.""")
+
+ uniform token basis = "bezier" (
+ allowedTokens = ["bezier", "bspline", "catmullRom"]
+ doc = """The basis specifies the vstep and matrix used for cubic
+ interpolation. \\note The 'hermite' and 'power' tokens have been
+ removed. We've provided UsdGeomHermiteCurves
+ as an alternative for the 'hermite' basis.""")
+
+ uniform token wrap = "nonperiodic" (
+ allowedTokens = ["nonperiodic", "periodic", "pinned"]
+ doc = """If wrap is set to periodic, the curve when rendered will
+ repeat the initial vertices (dependent on the vstep) to close the
+ curve. If wrap is set to 'pinned', phantom points may be created
+ to ensure that the curve interpolation starts at P[0] and ends at P[n-1].
+ """)
+}
+
+class NurbsCurves "NurbsCurves" (
+ inherits =
+ doc = """This schema is analagous to NURBS Curves in packages like Maya
+ and Houdini, often used for interchange of rigging and modeling curves.
+ Unlike Maya, this curve spec supports batching of multiple curves into a
+ single prim, widths, and normals in the schema. Additionally, we require
+ 'numSegments + 2 * degree + 1' knots (2 more than maya does). This is to
+ be more consistent with RenderMan's NURBS patch specification.
+
+ To express a periodic curve:
+ - knot[0] = knot[1] - (knots[-2] - knots[-3];
+ - knot[-1] = knot[-2] + (knot[2] - knots[1]);
+
+ To express a nonperiodic curve:
+ - knot[0] = knot[1];
+ - knot[-1] = knot[-2];
+
+ In spite of these slight differences in the spec, curves generated in Maya
+ should be preserved when roundtripping.
+
+ \\em order and \\em range, when representing a batched NurbsCurve should be
+ authored one value per curve. \\em knots should be the concatentation of
+ all batched curves."""
+) {
+ # topology attributes
+ int[] order = [] (
+ doc = """Order of the curve. Order must be positive and is
+ equal to the degree of the polynomial basis to be evaluated, plus 1.
+ Its value for the 'i'th curve must be less than or equal to
+ curveVertexCount[i]""")
+
+ # interpolation attributes
+ double[] knots (
+ doc = """Knot vector providing curve parameterization.
+ The length of the slice of the array for the ith curve
+ must be ( curveVertexCount[i] + order[i] ), and its
+ entries must take on monotonically increasing values.""")
+
+ double2[] ranges (
+ doc = """Provides the minimum and maximum parametric values (as defined
+ by knots) over which the curve is actually defined. The minimum must
+ be less than the maximum, and greater than or equal to the value of the
+ knots['i'th curve slice][order[i]-1]. The maxium must be less
+ than or equal to the last element's value in knots['i'th curve slice].
+ Range maps to (vmin, vmax) in the RenderMan spec.""")
+
+ double[] pointWeights (
+ doc = """Optionally provides "w" components for each control point,
+ thus must be the same length as the points attribute. If authored,
+ the curve will be rational. If unauthored, the curve will be
+ polynomial, i.e. weight for all points is 1.0.
+ \\note Some DCC's pre-weight the \\em points, but in this schema,
+ \\em points are not pre-weighted."""
+ )
+}
+
+class Points "Points" (
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+ inherits =
+ doc = """Points are analogous to the RiPoints spec.
+
+ Points can be an efficient means of storing and rendering particle
+ effects comprised of thousands or millions of small particles. Points
+ generally receive a single shading sample each, which should take
+ \\em normals into account, if present.
+
+ While not technically UsdGeomPrimvars, the widths and normals also
+ have interpolation metadata. It's common for authored widths and normals
+ to have constant or varying interpolation."""
+
+) {
+ # shaping attributes
+ float[] widths (
+ doc = """Widths are defined as the \\em diameter of the points, in
+ object space. 'widths' is not a generic Primvar, but
+ the number of elements in this attribute will be determined by
+ its 'interpolation'. See \\ref SetWidthsInterpolation() . If
+ 'widths' and 'primvars:widths' are both specified, the latter
+ has precedence."""
+ )
+
+ int64[] ids (
+ doc = """Ids are optional; if authored, the ids array should be the same
+ length as the points array, specifying (at each timesample if
+ point identities are changing) the id of each point. The
+ type is signed intentionally, so that clients can encode some
+ binary state on Id'd points without adding a separate
+ primvar."""
+ )
+}
+
+class PointInstancer "PointInstancer" (
+ doc = """Encodes vectorized instancing of multiple, potentially
+ animated, prototypes (object/instance masters), which can be arbitrary
+ prims/subtrees on a UsdStage.
+
+ PointInstancer is a "multi instancer", as it allows multiple prototypes
+ to be scattered among its "points". We use a UsdRelationship
+ \\em prototypes to identify and order all of the possible prototypes, by
+ targeting the root prim of each prototype. The ordering imparted by
+ relationships associates a zero-based integer with each prototype, and
+ it is these integers we use to identify the prototype of each instance,
+ compactly, and allowing prototypes to be swapped out without needing to
+ reauthor all of the per-instance data.
+
+ The PointInstancer schema is designed to scale to billions of instances,
+ which motivates the choice to split the per-instance transformation into
+ position, (quaternion) orientation, and scales, rather than a
+ 4x4 matrix per-instance. In addition to requiring fewer bytes even if
+ all elements are authored (32 bytes vs 64 for a single-precision 4x4
+ matrix), we can also be selective about which attributes need to animate
+ over time, for substantial data reduction in many cases.
+
+ Note that PointInstancer is \\em not a Gprim, since it is not a graphical
+ primitive by any stretch of the imagination. It \\em is, however,
+ Boundable, since we will sometimes want to treat the entire PointInstancer
+ similarly to a procedural, from the perspective of inclusion or framing.
+
+ \\section UsdGeomPointInstancer_varyingTopo Varying Instance Identity over Time
+
+ PointInstancers originating from simulations often have the characteristic
+ that points/instances are "born", move around for some time period, and then
+ die (or leave the area of interest). In such cases, billions of instances
+ may be birthed over time, while at any \\em specific time, only a much
+ smaller number are actually alive. To encode this situation efficiently,
+ the simulator may re-use indices in the instance arrays, when a particle
+ dies, its index will be taken over by a new particle that may be birthed in
+ a much different location. This presents challenges both for
+ identity-tracking, and for motion-blur.
+
+ We facilitate identity tracking by providing an optional, animatable
+ \\em ids attribute, that specifies the 64 bit integer ID of the particle
+ at each index, at each point in time. If the simulator keeps monotonically
+ increasing a particle-count each time a new particle is birthed, it will
+ serve perfectly as particle \\em ids.
+
+ We facilitate motion blur for varying-topology particle streams by
+ optionally allowing per-instance \\em velocities and \\em angularVelocities
+ to be authored. If instance transforms are requested at a time between
+ samples and either of the velocity attributes is authored, then we will
+ not attempt to interpolate samples of \\em positions or \\em orientations.
+ If not authored, and the bracketing samples have the same length, then we
+ will interpolate.
+
+ \\section UsdGeomPointInstancer_transform Computing an Instance Transform
+
+ Each instance's transformation is a combination of the SRT affine transform
+ described by its scale, orientation, and position, applied \\em after
+ (i.e. less locally than) the local to parent transformation computed at
+ the root of the prototype it is instancing.
+
+ If your processing of prototype geometry naturally takes into account the
+ transform of the prototype root, then this term can be omitted from the
+ computation of each instance transform, and this can be controlled when
+ computing instance transformation matrices using the
+ UsdGeomPointInstancer::PrototypeXformInclusion enumeration.
+
+ To understand the computation of the instance transform, in order to put
+ an instance of a PointInstancer into the space of the PointInstancer's
+ parent prim we do the following:
+
+ 1. Apply (most locally) the authored local to parent transformation for
+ prototypes[protoIndices[i]]
+ 2. If *scales* is authored, next apply the scaling matrix from *scales[i]*
+ 3. If *orientations* is authored: **if *angularVelocities* is authored**,
+ first multiply *orientations[i]* by the unit quaternion derived by scaling
+ *angularVelocities[i]* by the \\ref UsdGeom_PITimeScaling "time differential"
+ from the left-bracketing timeSample for *orientation* to the requested
+ evaluation time *t*, storing the result in *R*, **else** assign *R*
+ directly from *orientations[i]*. Apply the rotation matrix derived
+ from *R*.
+ 4. Apply the translation derived from *positions[i]*. If *velocities* is
+ authored, apply the translation deriving from *velocities[i]* scaled by
+ the time differential from the left-bracketing timeSample for *positions*
+ to the requested evaluation time *t*.
+ 5. Least locally, apply the transformation authored on the PointInstancer
+ prim itself (or the UsdGeomImageable::ComputeLocalToWorldTransform() of the
+ PointInstancer to put the instance directly into world space)
+
+ If neither *velocities* nor *angularVelocities* are authored, we fallback to
+ standard position and orientation computation logic (using linear
+ interpolation between timeSamples) as described by
+ \\ref UsdGeom_VelocityInterpolation .
+
+ \\anchor UsdGeom_PITimeScaling
+ Scaling Velocities for Interpolation
+
+ When computing time-differentials by which to apply velocity or
+ angularVelocity to positions or orientations, we must scale by
+ ( 1.0 / UsdStage::GetTimeCodesPerSecond() ), because velocities are recorded
+ in units/second, while we are interpolating in UsdTimeCode ordinates.
+
+ We provide both high and low-level API's for dealing with the
+ transformation as a matrix, both will compute the instance matrices using
+ multiple threads; the low-level API allows the client to cache unvarying
+ inputs so that they need not be read duplicately when computing over
+ time.
+
+ See also \\ref UsdGeom_VelocityInterpolation .
+
+ \\section UsdGeomPointInstancer_primvars Primvars on PointInstancer
+
+ \\ref UsdGeomPrimvar "Primvars" authored on a PointInstancer prim should
+ always be applied to each instance with \\em constant interpolation at
+ the root of the instance. When you are authoring primvars on a
+ PointInstancer, think about it as if you were authoring them on a
+ point-cloud (e.g. a UsdGeomPoints gprim). The same
+ interpolation rules for points apply here, substituting
+ "instance" for "point".
+
+ In other words, the (constant) value extracted for each instance
+ from the authored primvar value depends on the authored \\em interpolation
+ and \\em elementSize of the primvar, as follows:
+ \\li constant or uniform : the entire authored value of the
+ primvar should be applied exactly to each instance.
+ \\li varying, vertex, or faceVarying: the first
+ \\em elementSize elements of the authored primvar array should be assigned to
+ instance zero, the second \\em elementSize elements should be assigned to
+ instance one, and so forth.
+
+
+ \\section UsdGeomPointInstancer_masking Masking Instances: "Deactivating" and Invising
+
+ Often a PointInstancer is created "upstream" in a graphics pipeline, and
+ the needs of "downstream" clients necessitate eliminating some of the
+ instances from further consideration. Accomplishing this pruning by
+ re-authoring all of the per-instance attributes is not very attractive,
+ since it may mean destructively editing a large quantity of data. We
+ therefore provide means of "masking" instances by ID, such that the
+ instance data is unmolested, but per-instance transform and primvar data
+ can be retrieved with the no-longer-desired instances eliminated from the
+ (smaller) arrays. PointInstancer allows two independent means of masking
+ instances by ID, each with different features that meet the needs of
+ various clients in a pipeline. Both pruning features' lists of ID's are
+ combined to produce the mask returned by ComputeMaskAtTime().
+
+ \\note If a PointInstancer has no authored \\em ids attribute, the masking
+ features will still be available, with the integers specifying element
+ position in the \\em protoIndices array rather than ID.
+
+ \\subsection UsdGeomPointInstancer_inactiveIds InactiveIds: List-edited, Unvarying Masking
+
+ The first masking feature encodes a list of IDs in a list-editable metadatum
+ called \\em inactiveIds, which, although it does not have any similar
+ impact to stage population as \\ref UsdPrim::SetActive() "prim activation",
+ it shares with that feature that its application is uniform over all time.
+ Because it is list-editable, we can \\em sparsely add and remove instances
+ from it in many layers.
+
+ This sparse application pattern makes \\em inactiveIds a good choice when
+ further downstream clients may need to reverse masking decisions made
+ upstream, in a manner that is robust to many kinds of future changes to
+ the upstream data.
+
+ See ActivateId(), ActivateIds(), DeactivateId(), DeactivateIds(),
+ ActivateAllIds()
+
+ \\subsection UsdGeomPointInstancer_invisibleIds invisibleIds: Animatable Masking
+
+ The second masking feature encodes a list of IDs in a time-varying
+ Int64Array-valued UsdAttribute called \\em invisibleIds , since it shares
+ with \\ref UsdGeomImageable::GetVisibilityAttr() "Imageable visibility"
+ the ability to animate object visibility.
+
+ Unlike \\em inactiveIds, overriding a set of opinions for \\em invisibleIds
+ is not at all straightforward, because one will, in general need to
+ reauthor (in the overriding layer) **all** timeSamples for the attribute
+ just to change one Id's visibility state, so it cannot be authored
+ sparsely. But it can be a very useful tool for situations like encoding
+ pre-computed camera-frustum culling of geometry when either or both of
+ the instances or the camera is animated.
+
+ See VisId(), VisIds(), InvisId(), InvisIds(), VisAllIds()
+
+ \\section UsdGeomPointInstancer_protoProcessing Processing and Not Processing Prototypes
+
+ Any prim in the scenegraph can be targeted as a prototype by the
+ \\em prototypes relationship. We do not, however, provide a specific
+ mechanism for identifying prototypes as geometry that should not be drawn
+ (or processed) in their own, local spaces in the scenegraph. We
+ encourage organizing all prototypes as children of the PointInstancer
+ prim that consumes them, and pruning "raw" processing and drawing
+ traversals when they encounter a PointInstancer prim; this is what the
+ UsdGeomBBoxCache and UsdImaging engines do.
+
+ There \\em is a pattern one can deploy for organizing the prototypes
+ such that they will automatically be skipped by basic UsdPrim::GetChildren()
+ or UsdPrimRange traversals. Usd prims each have a
+ \\ref Usd_PrimSpecifiers "specifier" of "def", "over", or "class". The
+ default traversals skip over prims that are "pure overs" or classes. So
+ to protect prototypes from all generic traversals and processing, place
+ them under a prim that is just an "over". For example,
+ \\code
+ 01 def PointInstancer "Crowd_Mid"
+ 02 {
+ 03 rel prototypes = [ , ]
+ 04
+ 05 over "Prototypes"
+ 06 {
+ 07 def "MaleThin_Business" (
+ 08 references = [@MaleGroupA/usd/MaleGroupA.usd@]
+ 09 variants = {
+ 10 string modelingVariant = "Thin"
+ 11 string costumeVariant = "BusinessAttire"
+ 12 }
+ 13 )
+ 14 { ... }
+ 15
+ 16 def "MaleThin_Casual"
+ 17 ...
+ 18 }
+ 19 }
+ \\endcode
+ """
+
+ inherits =
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ dictionary schemaTokens = {
+ dictionary inactiveIds = {
+ string doc = """int64listop prim metadata that specifies
+ the PointInstancer ids that should be masked (unrenderable)
+ over all time."""
+ }
+ }
+ }
+) {
+ rel prototypes (
+ doc = """Required property. Orders and targets the prototype root
+ prims, which can be located anywhere in the scenegraph that is convenient,
+ although we promote organizing prototypes as children of the
+ PointInstancer. The position of a prototype in this relationship defines
+ the value an instance would specify in the \\em protoIndices attribute to
+ instance that prototype. Since relationships are uniform, this property
+ cannot be animated."""
+ )
+
+ int[] protoIndices (
+ doc = """Required property. Per-instance index into
+ \\em prototypes relationship that identifies what geometry should be
+ drawn for each instance. Topology attribute - can be animated,
+ but at a potential performance impact for streaming."""
+ )
+
+ int64[] ids (
+ doc = """Ids are optional; if authored, the ids array should be the same
+ length as the \\em protoIndices array, specifying (at each timeSample if
+ instance identities are changing) the id of each instance. The
+ type is signed intentionally, so that clients can encode some
+ binary state on Id'd instances without adding a separate primvar.
+ See also \\ref UsdGeomPointInstancer_varyingTopo"""
+ )
+
+ point3f[] positions (
+ doc = """Required property. Per-instance position. See also
+ \\ref UsdGeomPointInstancer_transform ."""
+ )
+
+ quath[] orientations (
+ doc="""If authored, per-instance orientation of each instance about its
+ prototype's origin, represented as a unit length quaternion, which
+ allows us to encode it with sufficient precision in a compact GfQuath.
+
+ It is client's responsibility to ensure that authored quaternions are
+ unit length; the convenience API below for authoring orientations from
+ rotation matrices will ensure that quaternions are unit length, though
+ it will not make any attempt to select the "better (for interpolation
+ with respect to neighboring samples)" of the two possible quaternions
+ that encode the rotation.
+
+ See also \\ref UsdGeomPointInstancer_transform .""" )
+
+ quatf[] orientationsf (
+ doc="""If authored, per-instance orientation of each instance about its
+ prototype's origin, represented as a unit length quaternion, encoded
+ as a GfQuatf to support higher precision computations.
+
+ It is client's responsibility to ensure that authored quaternions are
+ unit length; the convenience API below for authoring orientations from
+ rotation matrices will ensure that quaternions are unit length, though
+ it will not make any attempt to select the "better (for interpolation
+ with respect to neighboring samples)" of the two possible quaternions
+ that encode the rotation. Note that if the earliest time sample (or
+ default value if there are no time samples) of orientationsf is not empty
+ orientationsf will be preferred over orientations if both are authored.
+
+ See also \\ref UsdGeomPointInstancer_transform .""" )
+
+ float3[] scales (
+ doc="""If authored, per-instance scale to be applied to
+ each instance, before any rotation is applied.
+
+ See also \\ref UsdGeomPointInstancer_transform ."""
+ )
+
+ vector3f[] velocities (
+ doc = """If provided, per-instance 'velocities' will be used to
+ compute positions between samples for the 'positions' attribute,
+ rather than interpolating between neighboring 'positions' samples.
+ Velocities should be considered mandatory if both \\em protoIndices
+ and \\em positions are animated. Velocity is measured in position
+ units per second, as per most simulation software. To convert to
+ position units per UsdTimeCode, divide by
+ UsdStage::GetTimeCodesPerSecond().
+
+ See also \\ref UsdGeomPointInstancer_transform,
+ \\ref UsdGeom_VelocityInterpolation ."""
+ )
+
+ vector3f[] accelerations (
+ doc = """If authored, per-instance 'accelerations' will be used with
+ velocities to compute positions between samples for the 'positions'
+ attribute rather than interpolating between neighboring 'positions'
+ samples. Acceleration is measured in position units per second-squared.
+ To convert to position units per squared UsdTimeCode, divide by the
+ square of UsdStage::GetTimeCodesPerSecond()."""
+ )
+
+ vector3f[] angularVelocities (
+ doc="""If authored, per-instance angular velocity vector to be used for
+ interoplating orientations. Angular velocities should be considered
+ mandatory if both \\em protoIndices and \\em orientations are animated.
+ Angular velocity is measured in degrees per second. To convert
+ to degrees per UsdTimeCode, divide by
+ UsdStage::GetTimeCodesPerSecond().
+
+ See also \\ref UsdGeomPointInstancer_transform ."""
+ )
+
+ int64[] invisibleIds = [] (
+ doc="""A list of id's to make invisible at the evaluation time.
+ See \\ref UsdGeomPointInstancer_invisibleIds ."""
+ )
+}
+
+
+class Camera "Camera" (
+ doc = """Transformable camera.
+
+ Describes optical properties of a camera via a common set of attributes
+ that provide control over the camera's frustum as well as its depth of
+ field. For stereo, the left and right camera are individual prims tagged
+ through the \\ref UsdGeomCamera::GetStereoRoleAttr() "stereoRole attribute".
+
+ There is a corresponding class GfCamera, which can hold the state of a
+ camera (at a particular time). \\ref UsdGeomCamera::GetCamera() and
+ \\ref UsdGeomCamera::SetFromCamera() convert between a USD camera prim and
+ a GfCamera.
+
+ To obtain the camera's location in world space, call the following on a
+ UsdGeomCamera 'camera':
+ \\code
+ GfMatrix4d camXform = camera.ComputeLocalToWorldTransform(time);
+ \\endcode
+ \\note
+ Cameras in USD are always "Y up", regardless of the stage's orientation
+ (i.e. UsdGeomGetStageUpAxis()). This means that the inverse of
+ 'camXform' (the VIEW half of the MODELVIEW transform in OpenGL parlance)
+ will transform the world such that the camera is at the origin, looking
+ down the -Z axis, with +Y as the up axis, and +X pointing to the right.
+ This describes a __right handed coordinate system__.
+
+ \\section UsdGeom_CameraUnits Units of Measure for Camera Properties
+
+ Despite the familiarity of millimeters for specifying some physical
+ camera properties, UsdGeomCamera opts for greater consistency with all
+ other UsdGeom schemas, which measure geometric properties in scene units,
+ as determined by UsdGeomGetStageMetersPerUnit(). We do make a
+ concession, however, in that lens and filmback properties are measured in
+ __tenths of a scene unit__ rather than "raw" scene units. This means
+ that with the fallback value of .01 for _metersPerUnit_ - i.e. scene unit
+ of centimeters - then these "tenth of scene unit" properties are
+ effectively millimeters.
+
+ \\note If one adds a Camera prim to a UsdStage whose scene unit is not
+ centimeters, the fallback values for filmback properties will be
+ incorrect (or at the least, unexpected) in an absolute sense; however,
+ proper imaging through a "default camera" with focusing disabled depends
+ only on ratios of the other properties, so the camera is still usable.
+ However, it follows that if even one property is authored in the correct
+ scene units, then they all must be.
+
+
+ \\sa \\ref UsdGeom_LinAlgBasics
+ """
+ inherits =
+ customData = {
+ string extraIncludes = """
+#include "pxr/base/gf/camera.h" """
+ }
+) {
+ # viewing frustum
+ token projection = "perspective" (
+ allowedTokens = ["perspective", "orthographic"])
+ float horizontalAperture = 20.9550 (
+ doc = """Horizontal aperture in tenths of a scene unit; see
+ \\ref UsdGeom_CameraUnits . Default is the equivalent of
+ the standard 35mm spherical projector aperture.""")
+ float verticalAperture = 15.2908 (
+ doc = """Vertical aperture in tenths of a scene unit; see
+ \\ref UsdGeom_CameraUnits . Default is the equivalent of
+ the standard 35mm spherical projector aperture.""")
+ float horizontalApertureOffset = 0.0 (
+ doc = """Horizontal aperture offset in the same units as
+ horizontalAperture. Defaults to 0.""")
+ float verticalApertureOffset = 0.0 (
+ doc = """Vertical aperture offset in the same units as
+ verticalAperture. Defaults to 0.""")
+ float focalLength = 50.0 (
+ doc = """Perspective focal length in tenths of a scene unit; see
+ \\ref UsdGeom_CameraUnits .""")
+ float2 clippingRange = (1, 1000000) (
+ doc = """Near and far clipping distances in scene units; see
+ \\ref UsdGeom_CameraUnits .""")
+ float4[] clippingPlanes = [] (
+ doc = """Additional, arbitrarily oriented clipping planes.
+ A vector (a,b,c,d) encodes a clipping plane that cuts off
+ (x,y,z) with a * x + b * y + c * z + d * 1 < 0 where (x,y,z)
+ are the coordinates in the camera's space.""")
+
+ # depth of field
+ float fStop = 0.0 (
+ doc = """Lens aperture. Defaults to 0.0, which turns off focusing.""")
+ float focusDistance = 0.0 (
+ doc = """Distance from the camera to the focus plane in scene units; see
+ \\ref UsdGeom_CameraUnits .""")
+
+ # stereoscopic 3D
+ uniform token stereoRole = "mono" (
+ allowedTokens = ["mono", "left", "right"]
+ doc = """If different from mono, the camera is intended to be the left
+ or right camera of a stereo setup.""")
+
+ # Parameters for motion blur
+ double shutter:open = 0.0 (
+ doc = """Frame relative shutter open time in UsdTimeCode units (negative
+ value indicates that the shutter opens before the current
+ frame time). Used for motion blur."""
+ )
+ double shutter:close = 0.0 (
+ doc = """Frame relative shutter close time, analogous comments from
+ shutter:open apply. A value greater or equal to shutter:open
+ should be authored, otherwise there is no exposure and a
+ renderer should produce a black image."""
+ )
+
+ # exposure adjustment
+ float exposure = 0.0 (
+ doc = """Exposure adjustment, as a log base-2 value. The default
+ of 0.0 has no effect. A value of 1.0 will double the
+ image-plane intensities in a rendered image; a value of
+ -1.0 will halve them."""
+ )
+}
+
+class "GeomModelAPI"
+(
+ inherits =
+ customData = {
+ string className = "ModelAPI"
+ string extraIncludes = """
+#include "pxr/usd/usdGeom/bboxCache.h"
+#include "pxr/usd/usdGeom/constraintTarget.h"
+#include "pxr/usd/usdGeom/imageable.h" """
+ dictionary schemaTokens = {
+ dictionary extentsHint = {
+ string doc = """Name of the attribute used to author extents
+ hints at the root of leaf models. Extents hints are stored by purpose
+ as a vector of GfVec3f values. They are ordered based on the order
+ of purpose tokens returned by
+ UsdGeomImageable::GetOrderedPurposeTokens."""
+ }
+ }
+ }
+ doc = """UsdGeomModelAPI extends the generic UsdModelAPI schema with
+ geometry specific concepts such as cached extents for the entire model,
+ constraint targets, and geometry-inspired extensions to the payload
+ lofting process.
+
+ As described in GetExtentsHint() below, it is useful to cache extents
+ at the model level. UsdGeomModelAPI provides schema for computing and
+ storing these cached extents, which can be consumed by UsdGeomBBoxCache to
+ provide fast access to precomputed extents that will be used as the model's
+ bounds ( see UsdGeomBBoxCache::UsdGeomBBoxCache() ).
+
+ \\section UsdGeomModelAPI_drawMode Draw Modes
+
+ Draw modes provide optional alternate imaging behavior for USD subtrees with
+ kind model. \\em model:drawMode (which is inheritable) and
+ \\em model:applyDrawMode (which is not) are resolved into a decision to stop
+ traversing the scene graph at a certain point, and replace a USD subtree
+ with proxy geometry.
+
+ The value of \\em model:drawMode determines the type of proxy geometry:
+ - \\em origin - Draw the model-space basis vectors of the replaced prim.
+ - \\em bounds - Draw the model-space bounding box of the replaced prim.
+ - \\em cards - Draw textured quads as a placeholder for the replaced prim.
+ - \\em default - An explicit opinion to draw the USD subtree as normal.
+ - \\em inherited - Defer to the parent opinion.
+
+ \\em model:drawMode falls back to _inherited_ so that a whole scene,
+ a large group, or all prototypes of a model hierarchy PointInstancer can
+ be assigned a draw mode with a single attribute edit. If no draw mode is
+ explicitly set in a hierarchy, the resolved value is _default_.
+
+ \\em model:applyDrawMode is meant to be written when an asset is authored,
+ and provides flexibility for different asset types. For example,
+ a character assembly (composed of character, clothes, etc) might have
+ \\em model:applyDrawMode set at the top of the subtree so the whole group
+ can be drawn as a single card object. An effects subtree might have
+ \\em model:applyDrawMode set at a lower level so each particle
+ group draws individually.
+
+ Models of kind component are automatically treated as if
+ \\em model:applyDrawMode were true if \\em model:applyDrawMode is not
+ authored on the component prim. A component prim will be drawn drawn with a
+ simplified representation when the prim has kind component,
+ \\em model:applyDrawMode is not authored (or authored to be true), and the
+ resolved (i.e. inherited down namespace) value for \\em model:drawMode is
+ not _default_. If you don't want component prims to use the resolved
+ non-default drawMode, you must apply the UsdGeomModelAPI schema on the prim
+ and explicitly set \\em model:applyDrawMode to false.
+
+ \\section UsdGeomModelAPI_cardGeometry Cards Geometry
+
+ The specific geometry used in cards mode is controlled by the
+ \\em model:cardGeometry attribute:
+ - \\em cross - Generate a quad normal to each basis direction and negative.
+ Locate each quad so that it bisects the model extents.
+ - \\em box - Generate a quad normal to each basis direction and negative.
+ Locate each quad on a face of the model extents, facing out.
+ - \\em fromTexture - Generate a quad for each supplied texture from
+ attributes stored in that texture's metadata.
+
+ For \\em cross and \\em box mode, the extents are calculated for purposes
+ \\em default, \\em proxy, and \\em render, at their earliest authored time.
+ If the model has no textures, all six card faces are rendered using
+ \\em model:drawModeColor. If one or more textures are present, only axes
+ with one or more textures assigned are drawn. For each axis, if both
+ textures (positive and negative) are specified, they'll be used on the
+ corresponding card faces; if only one texture is specified, it will be
+ mapped to the opposite card face after being flipped on the texture's
+ s-axis. Any card faces with invalid asset paths will be drawn with
+ \\em model:drawModeColor.
+
+ Both \\em model:cardGeometry and \\em model:drawModeColor should be
+ authored on the prim where the draw mode takes effect, since these
+ attributes are not inherited.
+
+ For \\em fromTexture mode, only card faces with valid textures assigned
+ are drawn. The geometry is generated by pulling the \\em worldtoscreen
+ attribute out of texture metadata. This is expected to be a 4x4 matrix
+ mapping the model-space position of the card quad to the clip-space quad
+ with corners (-1,-1,0) and (1,1,0). The card vertices are generated by
+ transforming the clip-space corners by the inverse of \\em worldtoscreen.
+ Textures are mapped so that (s) and (t) map to (+x) and (+y) in clip space.
+ If the metadata cannot be read in the right format, or the matrix can't
+ be inverted, the card face is not drawn.
+
+ All card faces are drawn and textured as single-sided.
+
+ \\todo CreatePayload() """
+)
+{
+ uniform token model:drawMode = "inherited" (
+ allowedTokens = ["origin", "bounds", "cards", "default", "inherited"]
+ doc = """Alternate imaging mode; applied to this prim or child prims
+ where \\em model:applyDrawMode is true, or where the prim
+ has kind \\em component and \\em model:applyDrawMode is not
+ authored. See \\ref UsdGeomModelAPI_drawMode
+ for mode descriptions."""
+ )
+ uniform bool model:applyDrawMode = false (
+ doc = """If true, and the resolved value of \\em model:drawMode is
+ non-default, apply an alternate imaging mode to this prim. See
+ \\ref UsdGeomModelAPI_drawMode."""
+ )
+ uniform float3 model:drawModeColor = (0.18, 0.18, 0.18) (
+ doc = """The base color of imaging prims inserted for alternate
+ imaging modes. For \\em origin and \\em bounds modes, this
+ controls line color; for \\em cards mode, this controls the
+ fallback quad color."""
+ )
+ uniform token model:cardGeometry = "cross" (
+ allowedTokens = ["cross", "box", "fromTexture"]
+ doc = """The geometry to generate for imaging prims inserted for \\em
+ cards imaging mode. See \\ref UsdGeomModelAPI_cardGeometry for
+ geometry descriptions."""
+ )
+ asset model:cardTextureXPos (
+ doc = """In \\em cards imaging mode, the texture applied to the X+ quad.
+ The texture axes (s,t) are mapped to model-space axes (-y, -z)."""
+ )
+ asset model:cardTextureYPos (
+ doc = """In \\em cards imaging mode, the texture applied to the Y+ quad.
+ The texture axes (s,t) are mapped to model-space axes (x, -z)."""
+ )
+ asset model:cardTextureZPos (
+ doc = """In \\em cards imaging mode, the texture applied to the Z+ quad.
+ The texture axes (s,t) are mapped to model-space axes (x, -y)."""
+ )
+ asset model:cardTextureXNeg (
+ doc = """In \\em cards imaging mode, the texture applied to the X- quad.
+ The texture axes (s,t) are mapped to model-space axes (y, -z)."""
+ )
+ asset model:cardTextureYNeg (
+ doc = """In \\em cards imaging mode, the texture applied to the Y- quad.
+ The texture axes (s,t) are mapped to model-space axes (-x, -z)."""
+ )
+ asset model:cardTextureZNeg (
+ doc = """In \\em cards imaging mode, the texture applied to the Z- quad.
+ The texture axes (s,t) are mapped to model-space axes (-x, -y)."""
+ )
+}
+
+class "MotionAPI"
+(
+ inherits =
+ doc = """UsdGeomMotionAPI encodes data that can live on any prim that
+ may affect computations involving:
+ - computed motion for motion blur
+ - sampling for motion blur
+
+ The \\ref GetMotionBlurScaleAttr() "motion:blurScale" attribute allows
+ artists to scale the __amount__ of motion blur to be rendered for parts
+ of the scene without changing the recorded animation. See
+ \\ref UsdGeomMotionAPI_blurScale for use and implementation details.
+
+ """
+)
+{
+ float motion:blurScale = 1.0 (
+ doc = """BlurScale is an __inherited__ float attribute that stipulates
+ the rendered motion blur (as typically specified via UsdGeomCamera's
+ _shutter:open_ and _shutter:close_ properties) should be scaled for
+ __all objects__ at and beneath the prim in namespace on which the
+ _motion:blurScale_ value is specified.
+
+ Without changing any other data in the scene, _blurScale_ allows artists to
+ "dial in" the amount of blur on a per-object basis. A _blurScale_
+ value of zero removes all blur, a value of 0.5 reduces blur by half,
+ and a value of 2.0 doubles the blur. The legal range for _blurScale_
+ is [0, inf), although very high values may result in extremely expensive
+ renders, and may exceed the capabilities of some renderers.
+
+ Although renderers are free to implement this feature however they see
+ fit, see \\ref UsdGeomMotionAPI_blurScale for our guidance on implementing
+ the feature universally and efficiently.
+
+ \\sa ComputeMotionBlurScale()
+ """
+ )
+
+ float motion:velocityScale = 1.0 (
+ customData = {
+ string apiName = "velocityScale"
+ }
+ doc = """\\deprecated
+
+ VelocityScale is an **inherited** float attribute that
+ velocity-based schemas (e.g. PointBased, PointInstancer) can consume
+ to compute interpolated positions and orientations by applying
+ velocity and angularVelocity, which is required for interpolating
+ between samples when topology is varying over time. Although these
+ quantities are generally physically computed by a simulator, sometimes
+ we require more or less motion-blur to achieve the desired look.
+ VelocityScale allows artists to dial-in, as a post-sim correction,
+ a scale factor to be applied to the velocity prior to computing
+ interpolated positions from it."""
+ )
+
+ int motion:nonlinearSampleCount = 3 (
+ customData = {
+ string apiName = "nonlinearSampleCount"
+ }
+ doc = """Determines the number of position or transformation samples
+ created when motion is described by attributes contributing non-linear
+ terms.
+
+ To give an example, imagine an application (such as a
+ renderer) consuming 'points' and the USD document also
+ contains 'accelerations' for the same prim. Unless the
+ application can consume these 'accelerations' itself, an
+ intermediate layer has to compute samples within the sampling
+ interval for the point positions based on the value of
+ 'points', 'velocities' and 'accelerations'. The number of these
+ samples is given by 'nonlinearSampleCount'. The samples are
+ equally spaced within the sampling interval.
+
+ Another example involves the PointInstancer where
+ 'nonlinearSampleCount' is relevant when 'angularVelocities'
+ or 'accelerations' are authored.
+
+ 'nonlinearSampleCount' is an **inherited** attribute, also
+ see ComputeNonlinearSampleCount()"""
+ )
+}
+
+class "XformCommonAPI"
+(
+ inherits =
+ doc = """This class provides API for authoring and retrieving a standard set
+ of component transformations which include a scale, a rotation, a
+ scale-rotate pivot and a translation. The goal of the API is to enhance
+ component-wise interchange. It achieves this by limiting the set of allowed
+ basic ops and by specifying the order in which they are applied. In addition
+ to the basic set of ops, the 'resetXformStack' bit can also be set to
+ indicate whether the underlying xformable resets the parent transformation
+ (i.e. does not inherit it's parent's transformation).
+
+ \\sa UsdGeomXformCommonAPI::GetResetXformStack()
+ \\sa UsdGeomXformCommonAPI::SetResetXformStack()
+
+ The operator-bool for the class will inform you whether an existing
+ xformable is compatible with this API.
+
+ The scale-rotate pivot is represented by a pair of (translate,
+ inverse-translate) xformOps around the scale and rotate operations.
+ The rotation operation can be any of the six allowed Euler angle sets.
+ \\sa UsdGeomXformOp::Type.
+
+ The xformOpOrder of an xformable that has all of the supported basic ops
+ is as follows:
+ ["xformOp:translate", "xformOp:translate:pivot", "xformOp:rotateXYZ",
+ "xformOp:scale", "!invert!xformOp:translate:pivot"].
+
+ It is worth noting that all of the ops are optional. For example, an
+ xformable may have only a translate or a rotate. It would still be
+ considered as compatible with this API. Individual SetTranslate(),
+ SetRotate(), SetScale() and SetPivot() methods are provided by this API
+ to allow such sparse authoring."""
+ customData = {
+ string apiSchemaType = "nonApplied"
+ string extraIncludes = """
+#include "pxr/usd/usdGeom/xformable.h"
+#include "pxr/usd/usdGeom/xformOp.h" """
+ dictionary schemaTokens = {
+ dictionary pivot = {
+ string doc = """Op suffix for the standard scale-rotate pivot
+ on a UsdGeomXformCommonAPI-compatible prim.
+ """
+ }
+ }
+ }
+)
+{
+}
+
+class HermiteCurves "HermiteCurves" (
+ inherits =
+ doc = """This schema specifies a cubic hermite interpolated curve batch as
+ sometimes used for defining guides for animation. While hermite curves can
+ be useful because they interpolate through their control points, they are
+ not well supported by high-end renderers for imaging. Therefore, while we
+ include this schema for interchange, we strongly recommend the use of
+ UsdGeomBasisCurves as the representation of curves intended to be rendered
+ (ie. hair or grass). Hermite curves can be converted to a Bezier
+ representation (though not from Bezier back to Hermite in general).
+
+ \\section UsdGeomHermiteCurves_Interpolation Point Interpolation
+
+ The initial cubic curve segment is defined by the first two points and
+ first two tangents. Additional segments are defined by additional
+ point / tangent pairs. The number of segments for each non-batched hermite
+ curve would be len(curve.points) - 1. The total number of segments
+ for the batched UsdGeomHermiteCurves representation is
+ len(points) - len(curveVertexCounts).
+
+ \\section UsdGeomHermiteCurves_Primvars Primvar, Width, and Normal Interpolation
+
+ Primvar interpolation is not well specified for this type as it is not
+ intended as a rendering representation. We suggest that per point
+ primvars would be linearly interpolated across each segment and should
+ be tagged as 'varying'.
+
+ It is not immediately clear how to specify cubic or 'vertex' interpolation
+ for this type, as we lack a specification for primvar tangents. This
+ also means that width and normal interpolation should be restricted to
+ varying (linear), uniform (per curve element), or constant (per prim).
+ """
+) {
+ vector3f[] tangents = [] (
+ doc = """Defines the outgoing trajectory tangent for each point.
+ Tangents should be the same size as the points attribute.""")
+}
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class "HydraGenerativeProceduralAPI" (
+ doc = """
+ This API extends and configures the core UsdProcGenerativeProcedural schema
+ defined within usdProc for use with hydra generative procedurals as defined
+ within hdGp.
+ """
+)
+{
+ token primvars:hdGp:proceduralType (
+ doc = """The registered name of a HdGpGenerativeProceduralPlugin to
+ be executed."""
+ )
+ token proceduralSystem = "hydraGenerativeProcedural" (
+ doc = '''
+ This value should correspond to a configured instance of
+ HdGpGenerativeProceduralResolvingSceneIndex which will evaluate the
+ procedural. The default value of "hydraGenerativeProcedural" matches
+ the equivalent default of HdGpGenerativeProceduralResolvingSceneIndex.
+ Multiple instances of the scene index can be used to determine where
+ within a scene index chain a given procedural will be evaluated.
+ '''
+ )
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ "UsdHydraDiscoveryPlugin": {
+ "bases": [
+ "NdrDiscoveryPlugin"
+ ],
+ "displayName": "Discovery plugin for deprecated hydra shaders."
+ },
+ "UsdHydraGenerativeProceduralAPI": {
+ "alias": {
+ "UsdSchemaBase": "HydraGenerativeProceduralAPI"
+ },
+ "apiSchemaCanOnlyApplyTo": [
+ "GenerativeProcedural"
+ ],
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdHydra",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+-- glslfx version 0.1
+
+//
+// Copyright 2018 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+
+-- configuration
+{
+ "techniques": {
+ "default": {
+ }
+ }
+}
+
+-- This file is intentionally empty.
+#usda 1.0
+
+def Shader "HwPtexTexture_1" (
+ sdrMetadata = {
+ token role = "texture"
+ token isPtex = "1"
+ }
+)
+{
+ uniform token info:id = "HwPtexTexture_1"
+ uniform token info:implementationSource = "sourceAsset"
+
+ # Add a dummy sourceAsset attribute with sourceType="glslfx", so that
+ # an entry gets created for this deprecated shader in the registry.
+ uniform asset info:glslfx:sourceAsset = @./empty.glslfx@
+
+ token inputs:faceIndexPrimvar = "ptexFaceIndex" (
+ sdrMetadata = {
+ token primvarProperty = "1"
+ }
+ )
+
+ token inputs:faceOffsetPrimvar = "ptexFaceOffset" (
+ sdrMetadata = {
+ token primvarProperty = "1"
+ }
+ )
+
+ asset inputs:file = @@ (
+ doc = "Asset path to the file containg the image data."
+ )
+
+ float inputs:frame (
+ doc = "The frame offset for animated textures."
+ )
+
+ float inputs:textureMemory (
+ doc = """Amount of memory used to store the texture (in Mb). A value of
+ zero specifies the native resolution."""
+ )
+
+ int inputs:faceIndex (
+ doc = """The Ptex face index from which to sample. By default, this
+ parameter will be bound to the ptexFaceIndex primvar."""
+ )
+
+ int inputs:faceOffset (
+ doc = """The Ptex face offset to be applied to the face index. By
+ default, this parameter will be bound to the ptexFaceOffset primvar."""
+ )
+
+ float outputs:r (
+ doc = "Outputs the red channel."
+ sdrMetadata = {
+ token swizzle = "x"
+ }
+ )
+
+ float outputs:g (
+ doc = "Outputs the green channel."
+ sdrMetadata = {
+ token swizzle = "y"
+ }
+ )
+
+ float outputs:b (
+ doc = "Outputs the blue channnel."
+ sdrMetadata = {
+ token swizzle = "z"
+ }
+ )
+
+ float outputs:a (
+ doc = "Outputs the alpha channnel."
+ sdrMetadata = {
+ token swizzle = "w"
+ }
+ )
+
+ float3 outputs:rgb (
+ doc = "Outputs the red, green and blue channels."
+ sdrMetadata = {
+ token swizzle = "xyz"
+ }
+ )
+
+ float4 outputs:rgba (
+ doc = "Outputs all 4 channels (red, green, blue and alpha)."
+ sdrMetadata = {
+ token swizzle = "xyzw"
+ }
+ )
+}
+
+def Shader "HwUvTexture_1" (
+ sdrMetadata = {
+ token role = "texture"
+ token primvars = "uv"
+ }
+)
+{
+ uniform token info:id = "HwUvTexture_1"
+ uniform token info:implementationSource = "sourceAsset"
+
+ # Add a dummy sourceAsset attribute with sourceType="glslfx", so that
+ # an entry gets created for this deprecated shader in the registry.
+ uniform asset info:glslfx:sourceAsset = @./empty.glslfx@
+
+ asset inputs:file = @@ (
+ doc = "Asset path to the file containg the image data."
+ )
+
+ float inputs:frame (
+ doc = "The frame offset for animated textures."
+ )
+
+ float inputs:textureMemory (
+ doc = """Amount of memory used to store the texture (in Mb). A value of
+ zero specifies the native resolution."""
+ )
+
+ float2 inputs:uv (
+ doc = "The uv coordinates at which to sample the texture."
+ )
+
+ token inputs:wrapS (
+ allowedTokens = ["clamp", "repeat", "mirror", "black"]
+ doc = "Specifies the wrap mode for this texture."
+ )
+
+ token inputs:wrapT (
+ allowedTokens = ["clamp", "repeat", "mirror", "black"]
+ doc = "Specifies the wrap mode for this texture."
+ )
+
+ token inputs:minFilter (
+ allowedTokens = ["nearest", "linear",
+ "linearMipmapLinear", "linearMipmapNearest",
+ "nearestMipmapLinear", "nearestMipmapNearest" ]
+ doc = "Specifies the minification filter mode for this texture."
+ )
+
+ token inputs:magFilter (
+ allowedTokens = ["nearest", "linear"]
+ doc = "Specifies the magnification filter mode for this texture."
+ )
+
+ float4 inputs:fallback = (0.0, 0.0, 0.0, 1.0) (
+ doc = """Fallback value to be used when no texture is connected."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+}
+
+def Shader "HwFieldReader_1" (
+ sdrMetadata = {
+ token role = "field"
+ }
+ doc = """Samples from a usdVol FieldAsset."""
+)
+{
+ uniform asset info:glslfx:sourceAsset = @./empty.glslfx@
+ uniform token info:implementationSource = "sourceAsset"
+ token inputs:fieldname = "" (
+ connectability = "interfaceOnly"
+ doc = """Name of the field to be fetched from volume
+ (e.g., FOO if the relationship from the volume to field is called field::FOO)."""
+ sdrMetadata = {
+ token fieldProperty = "1"
+ }
+ )
+}
+
+def Shader "HwFieldReader_float" (
+ inherits =
+)
+{
+ uniform token info:id = "HwFieldReader_float"
+ float inputs:fallback = 0 (
+ doc = "Fallback value to be returned when fetch failed."
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+ float outputs:result (
+ sdrMetadata = {
+ token swizzle = "x"
+ }
+ )
+}
+
+def Shader "HwFieldReader_float2" (
+ inherits =
+)
+{
+ uniform token info:id = "HwFieldReader_float2"
+ float2 inputs:fallback = (0, 0) (
+ doc = "Fallback value to be returned when fetch failed."
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+ float2 outputs:result (
+ sdrMetadata = {
+ token swizzle = "xy"
+ }
+ )
+}
+
+def Shader "HwFieldReader_float3" (
+ inherits =
+)
+{
+ uniform token info:id = "HwFieldReader_float3"
+ float3 inputs:fallback = (0, 0, 0) (
+ doc = "Fallback value to be returned when fetch failed."
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+ float3 outputs:result (
+ sdrMetadata = {
+ token swizzle = "xyz"
+ }
+ )
+}
+
+def Shader "HwPrimvar_1" (
+ sdrMetadata = {
+ token role = "primvar"
+ }
+)
+{
+ uniform token info:id = "HwPrimvar_1"
+ uniform token info:implementationSource = "sourceAsset"
+
+ # Add a dummy sourceAsset attribute with sourceType="glslfx", so that
+ # an entry gets created for this deprecated shader in the registry.
+ uniform asset info:glslfx:sourceAsset = @./empty.glslfx@
+
+ token inputs:varname = "" (
+ sdrMetadata = {
+ bool primvarProperty = 1
+ }
+ doc = """The name of the primvar. Note that on the gprim, this primvar
+ must follow the standard UsdGeom primvar declaration.
+ Further note that this name should not contain the UsdGeom primvar
+ namespace prefix.
+ """
+ )
+}
+#usda 1.0
+(
+ "This file describes the Pixar-specific USD Geometric schemata for code generation."
+ subLayers = [
+ @usd/schema.usda@
+ ]
+)
+
+over "GLOBAL" (
+ customData = {
+ string libraryName = "usdHydra"
+ string libraryPath = "pxr/usd/usdHydra"
+ dictionary libraryTokens = {
+
+ dictionary HwPrimvar_1 = {
+ string doc = """The id value of a Primvar shader."""
+ }
+
+ dictionary HwPtexTexture_1 = {
+ string doc = """The id value of a PtexTexture shader."""
+ }
+
+ dictionary HwUvTexture_1 = {
+ string doc = """The id value of a UvTexture shader."""
+ }
+
+ dictionary displayLookBxdf = {
+ string value = "displayLook:bxdf"
+ string doc = """\deprecated This has been deprecated in favor of
+ the glslfx:surface output.
+
+ Relationship on a material that targets the "bxdf" or the
+ surface shader prim."""
+ }
+
+ dictionary infoFilename = {
+ string value = "inputs:file"
+ string doc = """ The special "info:filename" property of a hydra
+ Texture shader, which points to a resolvable texture asset."""
+ }
+
+
+ dictionary infoVarname = {
+ string value = "inputs:varname"
+ string doc = """
+ """
+ }
+
+ dictionary textureMemory = {
+ string doc = """A shader input on a hydra Texture shader."""
+ }
+
+ dictionary frame = {
+ string doc = """A shader input on a "Texture" shader."""
+ }
+
+ dictionary uv = {
+ string doc = """A shader input on a hydra UvTexture shader."""
+ }
+
+ dictionary wrapS = {
+ string doc = """A shader input on a hydra UvTexture shader which
+ defines the behavior of texture coordinates that are outside the
+ bounds of the texture."""
+ }
+
+
+ dictionary wrapT = {
+ string doc = """A shader input on a hydra UvTexture shader which
+ defines the behavior of texture coordinates that are outside the
+ bounds of the texture."""
+ }
+
+ dictionary black = {
+ string doc = """Possible value for "wrapT" and "wrapS" inputs on
+ a "UvTexture" shader prim.
+ Causes black to be returned when sampling outside the bounds of
+ the texture."""
+ }
+
+ dictionary clamp = {
+ string doc = """Possible value for "wrapT" and "wrapS" inputs on
+ a "UvTexture" shader prim.
+ Causes the the texture coordinate to be clamped to [0,1]."""
+ }
+
+ dictionary mirror = {
+ string doc = """Possible value for "wrapT" and "wrapS" inputs on
+ a "UvTexture" shader prim.
+ Causes the texture coordinate to wrap around like a mirror. -0.2
+ becomes 0.2, -1.2 becomes 0.8, etc. ,"""
+ }
+
+ dictionary repeat = {
+ string doc = """Possible value for "wrapT" and "wrapS" inputs on
+ a "UvTexture" shader prim.
+ Causes the texture coordinate to wrap around the texture. So a
+ texture coordinate of -0.2 becomes the equivalent of 0.8."""
+ }
+
+ dictionary useMetadata = {
+ string doc = """Possible value for "wrapT" and "wrapS" inputs on
+ a "UvTexture" shader prim.
+ Causes the wrap value to be loaded from the texture file instead
+ of being specified in the prim. If the texture file doesn't
+ support metadata or the metadata doesn't contain a wrap mode,
+ the "black" wrap mode is used."""
+ }
+
+ dictionary magFilter = {
+ string doc = """An input on a UvTexture shader."""
+ }
+
+ dictionary minFilter = {
+ string doc = """An input on a UvTexture shader."""
+ }
+
+ dictionary linearMipmapLinear = {
+ string doc = """See https://www.opengl.org/wiki/Sampler_Object ,
+ Possible value for the "minFilter" input on a UvTexture shader.
+ """
+ }
+
+ dictionary linearMipmapNearest = {
+ string doc = """See https://www.opengl.org/wiki/Sampler_Object
+ Possible value for the "minFilter" input on a UvTexture shader.
+ """
+ }
+
+ dictionary nearestMipmapNearest = {
+ string doc = """See https://www.opengl.org/wiki/Sampler_Object
+ Possible value for the "minFilter" input on a UvTexture shader.
+ """
+ }
+
+ dictionary linear = {
+ string doc = """A weighted linear blend of nearest adjacent
+ samples.
+ Possible value for "minFilter" and "magFilter" inputs on a
+ UvTextureshader."""
+ }
+
+ dictionary nearest = {
+ string doc = """Selects the nearest sample for the given
+ coordinate
+ Possible value for "minFilter" and "magFilter" inputs on a
+ UvTexture shader."""
+ }
+
+ dictionary nearestMipmapLinear = {
+ string doc = """See https://www.opengl.org/wiki/Sampler_Object
+ Possible value for "minFilter" and "magFilter" inputs on a
+ UvTexture shader."""
+ }
+
+ dictionary faceIndex = {
+ string doc = """The "faceIndex" shader input on a hydra
+ "PtexTexture" shader."""
+ }
+
+ dictionary faceOffset = {
+ string doc = """The "faceOffset" shader input on a hydra
+ "PtexTexture" shader."""
+ }
+ }
+ }
+){
+}
+
+class "HydraGenerativeProceduralAPI" (
+ inherits =
+ doc = """
+ This API extends and configures the core UsdProcGenerativeProcedural schema
+ defined within usdProc for use with hydra generative procedurals as defined
+ within hdGp.
+ """
+ customData = {
+ string className = "GenerativeProceduralAPI"
+ token[] apiSchemaCanOnlyApplyTo = ["GenerativeProcedural"]
+ }
+){
+ token primvars:hdGp:proceduralType (
+ doc = """The registered name of a HdGpGenerativeProceduralPlugin to
+ be executed."""
+
+ customData = {
+ string apiName = "proceduralType"
+ }
+ )
+
+ token proceduralSystem = "hydraGenerativeProcedural" (
+ doc = """
+ This value should correspond to a configured instance of
+ HdGpGenerativeProceduralResolvingSceneIndex which will evaluate the
+ procedural. The default value of "hydraGenerativeProcedural" matches
+ the equivalent default of HdGpGenerativeProceduralResolvingSceneIndex.
+ Multiple instances of the scene index can be used to determine where
+ within a scene index chain a given procedural will be evaluated.
+ """
+ )
+}
+{
+ "Plugins": [
+ {
+ "Info": {
+ "SdfMetadata": {
+ "faceIndexPrimvar": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": "ptexFaceIndex",
+ "documentation": "Specifies an array of face indices used for ptex mapping",
+ "type": "token"
+ },
+ "faceOffsetPrimvar": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": "ptexFaceOffset",
+ "documentation": "Specifies the ptex face index offset for aggregated ptex files",
+ "type": "token"
+ },
+ "uvPrimvar": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": "",
+ "documentation": "Specifies the UV primvar for texture mapping",
+ "type": "token"
+ }
+ },
+ "Types": {
+ "UsdImagingBasisCurvesAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "BasisCurves"
+ },
+ "UsdImagingCameraAdapter": {
+ "bases": [
+ "UsdImagingPrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Camera"
+ },
+ "UsdImagingCapsuleAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "includeSchemaFamily": true,
+ "primTypeName": "Capsule"
+ },
+ "UsdImagingCollectionAPIAdapter" : {
+ "bases": ["UsdImagingAPISchemaAdapter"],
+ "isInternal": true,
+ "apiSchemaName": "CollectionAPI"
+ },
+ "UsdImagingConeAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Cone"
+ },
+ "UsdImagingCoordSysAdapter": {
+ "bases": [
+ "UsdImagingPrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "coordSys"
+ },
+ "UsdImagingCoordSysAPIAdapter" : {
+ "bases": ["UsdImagingAPISchemaAdapter"],
+ "isInternal": true,
+ "apiSchemaName": "CoordSysAPI"
+ },
+ "UsdImagingCubeAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Cube"
+ },
+ "UsdImagingCylinderAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "includeSchemaFamily": true,
+ "primTypeName": "Cylinder"
+ },
+ "UsdImagingDrawModeAdapter" : {
+ "bases": ["UsdImagingInstanceablePrimAdapter"],
+ "isInternal": true,
+ "primTypeName": "__drawModeAdapter"
+ },
+ "UsdImagingGeomSubsetAdapter" : {
+ "bases": ["UsdImagingPrimAdapter"],
+ "isInternal": true,
+ "primTypeName": "GeomSubset"
+ },
+ "UsdImagingHermiteCurvesAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "HermiteCurves"
+ },
+ "UsdImagingMaterialAdapter" : {
+ "bases": ["UsdImagingPrimAdapter"],
+ "isInternal": true,
+ "primTypeName": "Material"
+ },
+
+ "UsdImagingShaderAdapter" : {
+ "bases": ["UsdImagingRepresentedByAncestorPrimAdapter"],
+ "isInternal": true,
+ "primTypeName": "Shader"
+ },
+
+ "UsdImagingNodeGraphAdapter" : {
+ "bases": ["UsdImagingRepresentedByAncestorPrimAdapter"],
+ "isInternal": true,
+ "primTypeName": "NodeGraph"
+ },
+
+ "UsdImagingMaterialBindingAPIAdapter" : {
+ "bases": ["UsdImagingAPISchemaAdapter"],
+ "isInternal": true,
+ "apiSchemaName": "MaterialBindingAPI"
+ },
+ "UsdImagingMeshAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Mesh"
+ },
+ "UsdImagingTetMeshAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "TetMesh"
+ },
+ "UsdImagingNurbsCurvesAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "NurbsCurves"
+ },
+ "UsdImagingNurbsPatchAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "NurbsPatch"
+ },
+ "UsdImagingPlaneAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Plane"
+ },
+ "UsdImagingPointsAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Points"
+ },
+ "UsdImagingPointInstancerAdapter": {
+ "bases": [
+ "UsdImagingInstanceablePrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "PointInstancer"
+ },
+ "UsdImagingSphereAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Sphere"
+ },
+ "UsdImagingRenderSettingsAdapter": {
+ "bases": [
+ "UsdImagingPrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "RenderSettings"
+ },
+ "UsdImagingRenderProductAdapter": {
+ "bases": [
+ "UsdImagingPrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "RenderProduct"
+ },
+ "UsdImagingRenderVarAdapter": {
+ "bases": [
+ "UsdImagingPrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "RenderVar"
+ },
+ "UsdImagingVolumeAdapter": {
+ "bases": [
+ "UsdImagingGprimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "Volume"
+ },
+ "UsdImagingLightAdapter": {
+ "bases": [
+ "UsdImagingInstanceablePrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "LightAPI",
+ "includeDerivedPrimTypes" : true
+ },
+ "UsdImagingLightAPIAdapter": {
+ "bases": [
+ "UsdImagingAPISchemaAdapter"
+ ],
+ "isInternal": true,
+ "apiSchemaName": "LightAPI"
+ },
+ "UsdImagingLightFilterAdapter": {
+ "bases": [
+ "UsdImagingPrimAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "LightFilter",
+ "includeDerivedPrimTypes" : true
+ },
+ "UsdImagingDomeLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "DomeLight"
+ },
+ "UsdImagingDomeLight_1Adapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "DomeLight_1"
+ },
+ "UsdImagingRectLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "RectLight"
+ },
+ "UsdImagingSphereLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "SphereLight"
+ },
+ "UsdImagingCylinderLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "CylinderLight"
+ },
+ "UsdImagingDiskLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "DiskLight"
+ },
+ "UsdImagingDistantLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "DistantLight"
+ },
+ "UsdImagingPluginLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "PluginLight"
+ },
+ "UsdImagingGeometryLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "GeometryLight"
+ },
+ "UsdImagingPortalLightAdapter": {
+ "bases": [
+ "UsdImagingLightAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "PortalLight"
+ },
+ "UsdImagingPluginLightFilterAdapter": {
+ "bases": [
+ "UsdImagingLightFilterAdapter"
+ ],
+ "isInternal": true,
+ "primTypeName": "PluginLightFilter"
+ },
+ "UsdImagingGeomModelAPIAdapter": {
+ "bases": [
+ "UsdImagingAPISchemaAdapter"
+ ],
+ "isInternal": true,
+ "apiSchemaName": "GeomModelAPI"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdImaging",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class "LightAPI" (
+ apiSchemas = ["CollectionAPI:lightLink", "CollectionAPI:shadowLink"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["collection:lightLink:includeRoot", "collection:shadowLink:includeRoot"]
+ }
+ doc = """API schema that imparts the quality of being a light onto a prim.
+
+ A light is any prim that has this schema applied to it. This is true
+ regardless of whether LightAPI is included as a built-in API of the prim
+ type (e.g. RectLight or DistantLight) or is applied directly to a Gprim
+ that should be treated as a light.
+
+ Linking
+
+ Lights can be linked to geometry. Linking controls which geometry
+ a light illuminates, and which geometry casts shadows from the light.
+
+ Linking is specified as collections (UsdCollectionAPI) which can
+ be accessed via GetLightLinkCollection() and GetShadowLinkCollection().
+ Note that these collections have their includeRoot set to true,
+ so that lights will illuminate and cast shadows from all objects
+ by default. To illuminate only a specific set of objects, there
+ are two options. One option is to modify the collection paths
+ to explicitly exclude everything else, assuming it is known;
+ the other option is to set includeRoot to false and explicitly
+ include the desired objects. These are complementary approaches
+ that may each be preferable depending on the scenario and how
+ to best express the intent of the light setup.
+ """
+)
+{
+ uniform bool collection:lightLink:includeRoot = 1
+ uniform bool collection:shadowLink:includeRoot = 1
+ color3f inputs:color = (1, 1, 1) (
+ displayGroup = "Basic"
+ displayName = "Color"
+ doc = "The color of emitted light, in energy-linear terms."
+ )
+ float inputs:colorTemperature = 6500 (
+ displayGroup = "Basic"
+ displayName = "Color Temperature"
+ doc = """Color temperature, in degrees Kelvin, representing the
+ white point. The default is a common white point, D65. Lower
+ values are warmer and higher values are cooler. The valid range
+ is from 1000 to 10000. Only takes effect when
+ enableColorTemperature is set to true. When active, the
+ computed result multiplies against the color attribute.
+ See UsdLuxBlackbodyTemperatureAsRgb()."""
+ )
+ float inputs:diffuse = 1 (
+ displayGroup = "Refine"
+ displayName = "Diffuse Multiplier"
+ doc = """A multiplier for the effect of this light on the diffuse
+ response of materials. This is a non-physical control."""
+ )
+ bool inputs:enableColorTemperature = 0 (
+ displayGroup = "Basic"
+ displayName = "Enable Color Temperature"
+ doc = "Enables using colorTemperature."
+ )
+ float inputs:exposure = 0 (
+ displayGroup = "Basic"
+ displayName = "Exposure"
+ doc = """Scales the power of the light exponentially as a power
+ of 2 (similar to an F-stop control over exposure). The result
+ is multiplied against the intensity."""
+ )
+ float inputs:intensity = 1 (
+ displayGroup = "Basic"
+ displayName = "Intensity"
+ doc = "Scales the power of the light linearly."
+ )
+ bool inputs:normalize = 0 (
+ displayGroup = "Advanced"
+ displayName = "Normalize Power"
+ doc = """Normalizes power by the surface area of the light.
+ This makes it easier to independently adjust the power and shape
+ of the light, by causing the power to not vary with the area or
+ angular size of the light."""
+ )
+ float inputs:specular = 1 (
+ displayGroup = "Refine"
+ displayName = "Specular Multiplier"
+ doc = """A multiplier for the effect of this light on the specular
+ response of materials. This is a non-physical control."""
+ )
+ rel light:filters (
+ doc = "Relationship to the light filters that apply to this light."
+ )
+ uniform token light:materialSyncMode = "noMaterialResponse" (
+ allowedTokens = ["materialGlowTintsLight", "independent", "noMaterialResponse"]
+ displayGroup = "Geometry"
+ displayName = "Material Sync Mode"
+ doc = """For a LightAPI applied to geometry that has a bound Material,
+ which is entirely or partly emissive, this specifies the relationship
+ of the Material response to the lighting response.
+ Valid values are:
+ - materialGlowTintsLight: All primary and secondary rays see the
+ emissive/glow response as dictated by the bound Material while the
+ base color seen by light rays (which is then modulated by all of the
+ other LightAPI controls) is the multiplication of the color feeding
+ the emission/glow input of the Material (i.e. its surface or volume
+ shader) with the scalar or pattern input to *inputs:color*.
+ This allows the light's color to tint the geometry's glow color while
+ preserving access to intensity and other light controls as ways to
+ further modulate the illumination.
+ - independent: All primary and secondary rays see the emissive/glow
+ response as dictated by the bound Material, while the base color seen
+ by light rays is determined solely by *inputs:color*. Note that for
+ partially emissive geometry (in which some parts are reflective
+ rather than emissive), a suitable pattern must be connected to the
+ light's color input, or else the light will radiate uniformly from
+ the geometry.
+ - noMaterialResponse: The geometry behaves as if there is no Material
+ bound at all, i.e. there is no diffuse, specular, or transmissive
+ response. The base color of light rays is entirely controlled by the
+ *inputs:color*. This is the standard mode for \"canonical\" lights in
+ UsdLux and indicates to renderers that a Material will either never
+ be bound or can always be ignored.
+ """
+ )
+ uniform token light:shaderId = "" (
+ displayGroup = "Internal"
+ doc = """Default ID for the light's shader.
+ This defines the shader ID for this light when a render context specific
+ shader ID is not available.
+
+ The default shaderId for the intrinsic UsdLux lights (RectLight,
+ DistantLight, etc.) are set to default to the light's type name. For
+ each intrinsic UsdLux light, we will always register an SdrShaderNode in
+ the SdrRegistry, with the identifier matching the type name and the
+ source type \"USD\", that corresponds to the light's inputs.
+ \\see GetShaderId
+ \\see GetShaderIdAttrForRenderContext
+ \\see SdrRegistry::GetShaderNodeByIdentifier
+ \\see SdrRegistry::GetShaderNodeByIdentifierAndType
+ """
+ )
+}
+
+class "MeshLightAPI" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:materialSyncMode", "light:shaderId"]
+ }
+ doc = '''This is the preferred API schema to apply to
+ "Mesh" type prims when adding light behaviors to a mesh.
+ At its base, this API schema has the built-in behavior of applying LightAPI
+ to the mesh and overriding the default materialSyncMode to allow the
+ emission/glow of the bound material to affect the color of the light.
+ But, it additionally serves as a hook for plugins to attach additional
+ properties to "mesh lights" through the creation of API schemas which are
+ authored to auto-apply to MeshLightAPI.
+ \\see \\ref Usd_AutoAppliedAPISchemas
+ '''
+)
+{
+ uniform token light:materialSyncMode = "materialGlowTintsLight"
+ uniform token light:shaderId = "MeshLight"
+}
+
+class "VolumeLightAPI" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:materialSyncMode", "light:shaderId"]
+ }
+ doc = '''This is the preferred API schema to apply to
+ "Volume" type prims when adding light behaviors to a
+ volume. At its base, this API schema has the built-in behavior of applying
+ LightAPI to the volume and overriding the default materialSyncMode to allow
+ the emission/glow of the bound material to affect the color of the light.
+ But, it additionally serves as a hook for plugins to attach additional
+ properties to "volume lights" through the creation of API schemas which are
+ authored to auto-apply to VolumeLightAPI.
+ \\see \\ref Usd_AutoAppliedAPISchemas
+ '''
+)
+{
+ uniform token light:materialSyncMode = "materialGlowTintsLight"
+ uniform token light:shaderId = "VolumeLight"
+}
+
+class "LightListAPI" (
+ doc = '''API schema to support discovery and publishing of lights in a scene.
+
+ Discovering Lights via Traversal
+
+ To motivate this API, consider what is required to discover all
+ lights in a scene. We must load all payloads and traverse all prims:
+
+ \\code
+ 01 // Load everything on the stage so we can find all lights,
+ 02 // including those inside payloads
+ 03 stage->Load();
+ 04
+ 05 // Traverse all prims, checking if they have an applied UsdLuxLightAPI
+ 06 // (Note: ignoring instancing and a few other things for simplicity)
+ 07 SdfPathVector lights;
+ 08 for (UsdPrim prim: stage->Traverse()) {
+ 09 if (prim.HasAPI()) {
+ 10 lights.push_back(i->GetPath());
+ 11 }
+ 12 }
+ \\endcode
+
+ This traversal -- suitably elaborated to handle certain details --
+ is the first and simplest thing UsdLuxLightListAPI provides.
+ UsdLuxLightListAPI::ComputeLightList() performs this traversal and returns
+ all lights in the scene:
+
+ \\code
+ 01 UsdLuxLightListAPI listAPI(stage->GetPseudoRoot());
+ 02 SdfPathVector lights = listAPI.ComputeLightList();
+ \\endcode
+
+ Publishing a Cached Light List
+
+ Consider a USD client that needs to quickly discover lights but
+ wants to defer loading payloads and traversing the entire scene
+ where possible, and is willing to do up-front computation and
+ caching to achieve that.
+
+ UsdLuxLightListAPI provides a way to cache the computed light list,
+ by publishing the list of lights onto prims in the model
+ hierarchy. Consider a big set that contains lights:
+
+ \\code
+ 01 def Xform "BigSetWithLights" (
+ 02 kind = "assembly"
+ 03 payload = @BigSetWithLights.usd@ // Heavy payload
+ 04 ) {
+ 05 // Pre-computed, cached list of lights inside payload
+ 06 rel lightList = [
+ 07 <./Lights/light_1>,
+ 08 <./Lights/light_2>,
+ 09 ...
+ 10 ]
+ 11 token lightList:cacheBehavior = "consumeAndContinue";
+ 12 }
+ \\endcode
+
+ The lightList relationship encodes a set of lights, and the
+ lightList:cacheBehavior property provides fine-grained
+ control over how to use that cache. (See details below.)
+
+ The cache can be created by first invoking
+ ComputeLightList(ComputeModeIgnoreCache) to pre-compute the list
+ and then storing the result with UsdLuxLightListAPI::StoreLightList().
+
+ To enable efficient retrieval of the cache, it should be stored
+ on a model hierarchy prim. Furthermore, note that while you can
+ use a UsdLuxLightListAPI bound to the pseudo-root prim to query the
+ lights (as in the example above) because it will perform a
+ traversal over descendants, you cannot store the cache back to the
+ pseduo-root prim.
+
+ To consult the cached list, we invoke
+ ComputeLightList(ComputeModeConsultModelHierarchyCache):
+
+ \\code
+ 01 // Find and load all lights, using lightList cache where available
+ 02 UsdLuxLightListAPI list(stage->GetPseudoRoot());
+ 03 SdfPathSet lights = list.ComputeLightList(
+ 04 UsdLuxLightListAPI::ComputeModeConsultModelHierarchyCache);
+ 05 stage.LoadAndUnload(lights, SdfPathSet());
+ \\endcode
+
+ In this mode, ComputeLightList() will traverse the model
+ hierarchy, accumulating cached light lists.
+
+ Controlling Cache Behavior
+
+ The lightList:cacheBehavior property gives additional fine-grained
+ control over cache behavior:
+
+ - The fallback value, "ignore", indicates that the lightList should
+ be disregarded. This provides a way to invalidate cache entries.
+ Note that unless "ignore" is specified, a lightList with an empty
+ list of targets is considered a cache indicating that no lights
+ are present.
+
+ - The value "consumeAndContinue" indicates that the cache should
+ be consulted to contribute lights to the scene, and that recursion
+ should continue down the model hierarchy in case additional lights
+ are added as descedants. This is the default value established when
+ StoreLightList() is invoked. This behavior allows the lights within
+ a large model, such as the BigSetWithLights example above, to be
+ published outside the payload, while also allowing referencing and
+ layering to add additional lights over that set.
+
+ - The value "consumeAndHalt" provides a way to terminate recursive
+ traversal of the scene for light discovery. The cache will be
+ consulted but no descendant prims will be examined.
+
+ Instancing
+
+ Where instances are present, UsdLuxLightListAPI::ComputeLightList() will
+ return the instance-unique paths to any lights discovered within
+ those instances. Lights within a UsdGeomPointInstancer will
+ not be returned, however, since they cannot be referred to
+ solely via paths.
+'''
+)
+{
+ rel lightList (
+ doc = "Relationship to lights in the scene."
+ )
+ token lightList:cacheBehavior (
+ allowedTokens = ["consumeAndHalt", "consumeAndContinue", "ignore"]
+ doc = """Controls how the lightList should be interpreted.
+ Valid values are:
+ - consumeAndHalt: The lightList should be consulted,
+ and if it exists, treated as a final authoritative statement
+ of any lights that exist at or below this prim, halting
+ recursive discovery of lights.
+ - consumeAndContinue: The lightList should be consulted,
+ but recursive traversal over nameChildren should continue
+ in case additional lights are added by descendants.
+ - ignore: The lightList should be entirely ignored. This
+ provides a simple way to temporarily invalidate an existing
+ cache. This is the fallback behavior.
+ """
+ )
+}
+
+class "ListAPI" (
+ doc = """
+ \\deprecated
+ Use LightListAPI instead
+"""
+)
+{
+ rel lightList (
+ doc = "Relationship to lights in the scene."
+ )
+ token lightList:cacheBehavior (
+ allowedTokens = ["consumeAndHalt", "consumeAndContinue", "ignore"]
+ doc = """Controls how the lightList should be interpreted.
+ Valid values are:
+ - consumeAndHalt: The lightList should be consulted,
+ and if it exists, treated as a final authoritative statement
+ of any lights that exist at or below this prim, halting
+ recursive discovery of lights.
+ - consumeAndContinue: The lightList should be consulted,
+ but recursive traversal over nameChildren should continue
+ in case additional lights are added by descendants.
+ - ignore: The lightList should be entirely ignored. This
+ provides a simple way to temporarily invalidate an existing
+ cache. This is the fallback behavior.
+ """
+ )
+}
+
+class "ShapingAPI" (
+ doc = "Controls for shaping a light's emission."
+)
+{
+ float inputs:shaping:cone:angle = 90 (
+ displayGroup = "Shaping"
+ displayName = "Cone Angle"
+ doc = """Angular limit off the primary axis to restrict the
+ light spread."""
+ )
+ float inputs:shaping:cone:softness = 0 (
+ displayGroup = "Shaping"
+ displayName = "Cone Softness"
+ doc = """Controls the cutoff softness for cone angle.
+ TODO: clarify semantics"""
+ )
+ float inputs:shaping:focus = 0 (
+ displayGroup = "Shaping"
+ displayName = "Emission Focus"
+ doc = """A control to shape the spread of light. Higher focus
+ values pull light towards the center and narrow the spread.
+ Implemented as an off-axis cosine power exponent.
+ TODO: clarify semantics"""
+ )
+ color3f inputs:shaping:focusTint = (0, 0, 0) (
+ displayGroup = "Shaping"
+ displayName = "Emission Focus Tint"
+ doc = """Off-axis color tint. This tints the emission in the
+ falloff region. The default tint is black.
+ TODO: clarify semantics"""
+ )
+ float inputs:shaping:ies:angleScale = 0 (
+ displayGroup = "Shaping"
+ displayName = "Profile Scale"
+ doc = """Rescales the angular distribution of the IES profile.
+ TODO: clarify semantics"""
+ )
+ asset inputs:shaping:ies:file (
+ displayGroup = "Shaping"
+ displayName = "IES Profile"
+ doc = """An IES (Illumination Engineering Society) light
+ profile describing the angular distribution of light."""
+ )
+ bool inputs:shaping:ies:normalize = 0 (
+ displayGroup = "Shaping"
+ displayName = "Profile Normalization"
+ doc = """Normalizes the IES profile so that it affects the shaping
+ of the light while preserving the overall energy output."""
+ )
+}
+
+class "ShadowAPI" (
+ doc = """Controls to refine a light's shadow behavior. These are
+ non-physical controls that are valuable for visual lighting work."""
+)
+{
+ color3f inputs:shadow:color = (0, 0, 0) (
+ displayGroup = "Shadows"
+ displayName = "Shadow Color"
+ doc = """The color of shadows cast by the light. This is a
+ non-physical control. The default is to cast black shadows."""
+ )
+ float inputs:shadow:distance = -1 (
+ displayGroup = "Shadows"
+ displayName = "Shadow Max Distance"
+ doc = """The maximum distance shadows are cast. The distance is
+ measured as the distance between the point on the surface and the
+ occluder.
+ The default value (-1) indicates no limit.
+ """
+ )
+ bool inputs:shadow:enable = 1 (
+ displayGroup = "Shadows"
+ displayName = "Enable Shadows"
+ doc = "Enables shadows to be cast by this light."
+ )
+ float inputs:shadow:falloff = -1 (
+ displayGroup = "Shadows"
+ displayName = "Shadow Falloff"
+ doc = """The size of the shadow falloff zone within the shadow max
+ distance, which can be used to hide the hard cut-off for shadows seen
+ stretching past the max distance. The falloff zone is the area that
+ fades from full shadowing at the beginning of the falloff zone to no
+ shadowing at the max distance from the occluder. The falloff zone
+ distance cannot exceed the shadow max distance. A falloff value equal
+ to or less than zero (with -1 as the default) indicates no falloff.
+ """
+ )
+ float inputs:shadow:falloffGamma = 1 (
+ displayGroup = "Shadows"
+ displayName = "Shadow Falloff Gamma"
+ doc = """A gamma (i.e., exponential) control over shadow strength
+ with linear distance within the falloff zone. This controls the rate
+ of the falloff.
+ This requires the use of shadowDistance and shadowFalloff."""
+ )
+}
+
+class LightFilter "LightFilter" (
+ apiSchemas = ["CollectionAPI:filterLink"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["collection:filterLink:includeRoot"]
+ }
+ doc = """A light filter modifies the effect of a light.
+ Lights refer to filters via relationships so that filters may be
+ shared.
+
+ Linking
+
+ Filters can be linked to geometry. Linking controls which geometry
+ a light-filter affects, when considering the light filters attached
+ to a light illuminating the geometry.
+
+ Linking is specified as a collection (UsdCollectionAPI) which can
+ be accessed via GetFilterLinkCollection().
+ """
+)
+{
+ uniform bool collection:filterLink:includeRoot = 1
+ uniform token lightFilter:shaderId = "" (
+ displayGroup = "Internal"
+ doc = """Default ID for the light filter's shader.
+ This defines the shader ID for this light filter when a render context
+ specific shader ID is not available.
+
+ \\see GetShaderId
+ \\see GetShaderIdAttrForRenderContext
+ \\see SdrRegistry::GetShaderNodeByIdentifier
+ \\see SdrRegistry::GetShaderNodeByIdentifierAndType
+ """
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "BoundableLightBase" (
+ apiSchemas = ["LightAPI"]
+ doc = """Base class for intrinsic lights that are boundable.
+
+ The primary purpose of this class is to provide a direct API to the
+ functions provided by LightAPI for concrete derived light types.
+ """
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "NonboundableLightBase" (
+ apiSchemas = ["LightAPI"]
+ doc = """Base class for intrinsic lights that are not boundable.
+
+ The primary purpose of this class is to provide a direct API to the
+ functions provided by LightAPI for concrete derived light types.
+ """
+)
+{
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class DistantLight "DistantLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["inputs:intensity", "light:shaderId"]
+ }
+ doc = """Light emitted from a distant source along the -Z axis.
+ Also known as a directional light."""
+)
+{
+ float inputs:angle = 0.53 (
+ displayGroup = "Basic"
+ displayName = "Angle Extent"
+ doc = """Angular size of the light in degrees.
+ As an example, the Sun is approximately 0.53 degrees as seen from Earth.
+ Higher values broaden the light and therefore soften shadow edges.
+ """
+ )
+ float inputs:intensity = 50000 (
+ doc = """Scales the emission of the light linearly.
+ The DistantLight has a high default intensity to approximate the Sun."""
+ )
+ uniform token light:shaderId = "DistantLight"
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class DiskLight "DiskLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = """Light emitted from one side of a circular disk.
+ The disk is centered in the XY plane and emits light along the -Z axis."""
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ float inputs:radius = 0.5 (
+ displayGroup = "Geometry"
+ displayName = "Radius"
+ doc = "Radius of the disk."
+ )
+ uniform token light:shaderId = "DiskLight"
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class RectLight "RectLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = """Light emitted from one side of a rectangle.
+ The rectangle is centered in the XY plane and emits light along the -Z axis.
+ The rectangle is 1 unit in length in the X and Y axis. In the default
+ position, a texture file's min coordinates should be at (+X, +Y) and
+ max coordinates at (-X, -Y)."""
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ float inputs:height = 1 (
+ displayGroup = "Geometry"
+ displayName = "Height"
+ doc = "Height of the rectangle, in the local Y axis."
+ )
+ asset inputs:texture:file (
+ displayGroup = "Basic"
+ displayName = "Color Map"
+ doc = "A color texture to use on the rectangle."
+ )
+ float inputs:width = 1 (
+ displayGroup = "Geometry"
+ displayName = "Width"
+ doc = "Width of the rectangle, in the local X axis."
+ )
+ uniform token light:shaderId = "RectLight"
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class SphereLight "SphereLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = "Light emitted outward from a sphere."
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ float inputs:radius = 0.5 (
+ displayGroup = "Geometry"
+ displayName = "Radius"
+ doc = "Radius of the sphere."
+ )
+ uniform token light:shaderId = "SphereLight"
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ bool treatAsPoint = 0 (
+ displayGroup = "Advanced"
+ displayName = "Treat As Point"
+ doc = """A hint that this light can be treated as a 'point'
+ light (effectively, a zero-radius sphere) by renderers that
+ benefit from non-area lighting. Renderers that only support
+ area lights can disregard this."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class CylinderLight "CylinderLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = """Light emitted outward from a cylinder.
+ The cylinder is centered at the origin and has its major axis on the X axis.
+ The cylinder does not emit light from the flat end-caps.
+ """
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ float inputs:length = 1 (
+ displayGroup = "Geometry"
+ displayName = "Length"
+ doc = "Length of the cylinder, in the local X axis."
+ )
+ float inputs:radius = 0.5 (
+ displayGroup = "Geometry"
+ displayName = "Radius"
+ doc = "Radius of the cylinder."
+ )
+ uniform token light:shaderId = "CylinderLight"
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ bool treatAsLine = 0 (
+ displayGroup = "Advanced"
+ displayName = "Treat As Line"
+ doc = """A hint that this light can be treated as a 'line'
+ light (effectively, a zero-radius cylinder) by renderers that
+ benefit from non-area lighting. Renderers that only support
+ area lights can disregard this."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class GeometryLight "GeometryLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = """\\deprecated
+ Light emitted outward from a geometric prim (UsdGeomGprim),
+ which is typically a mesh."""
+)
+{
+ rel geometry (
+ doc = "Relationship to the geometry to use as the light source."
+ )
+ uniform token light:shaderId = "GeometryLight"
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class DomeLight "DomeLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = """Light emitted inward from a distant external environment,
+ such as a sky or IBL light probe.
+
+ In this version of the dome light, the dome's default orientation is such
+ that its top pole is aligned with the world's +Y axis. This adheres to the
+ OpenEXR specification for latlong environment maps. From the OpenEXR
+ documentation:
+
+ -------------------------------------------------------------------------
+ Latitude-Longitude Map:
+
+ The environment is projected onto the image using polar coordinates
+ (latitude and longitude). A pixel's x coordinate corresponds to
+ its longitude, and the y coordinate corresponds to its latitude.
+ Pixel (dataWindow.min.x, dataWindow.min.y) has latitude +pi/2 and
+ longitude +pi; pixel (dataWindow.max.x, dataWindow.max.y) has
+ latitude -pi/2 and longitude -pi.
+
+ In 3D space, latitudes -pi/2 and +pi/2 correspond to the negative and
+ positive y direction. Latitude 0, longitude 0 points into positive
+ z direction; and latitude 0, longitude pi/2 points into positive x
+ direction.
+
+ The size of the data window should be 2*N by N pixels (width by height),
+ where N can be any integer greater than 0.
+ -------------------------------------------------------------------------
+"""
+)
+{
+ float guideRadius = 100000 (
+ displayGroup = "Guides"
+ displayName = "Radius"
+ doc = "The radius of guide geometry to use to visualize the dome light. The default is 1 km for scenes whose metersPerUnit is the USD default of 0.01 (i.e., 1 world unit is 1 cm)."
+ )
+ asset inputs:texture:file (
+ displayGroup = "Basic"
+ displayName = "Color Map"
+ doc = """A color texture to use on the dome, such as an HDR (high
+ dynamic range) texture intended for IBL (image based lighting)."""
+ )
+ token inputs:texture:format = "automatic" (
+ allowedTokens = ["automatic", "latlong", "mirroredBall", "angular", "cubeMapVerticalCross"]
+ displayGroup = "Basic"
+ displayName = "Color Map Format"
+ doc = """Specifies the parameterization of the color map file.
+ Valid values are:
+ - automatic: Tries to determine the layout from the file itself.
+ For example, Renderman texture files embed an explicit
+ parameterization.
+ - latlong: Latitude as X, longitude as Y.
+ - mirroredBall: An image of the environment reflected in a
+ sphere, using an implicitly orthogonal projection.
+ - angular: Similar to mirroredBall but the radial dimension
+ is mapped linearly to the angle, providing better sampling
+ at the edges.
+ - cubeMapVerticalCross: A cube map with faces laid out as a
+ vertical cross.
+ """
+ )
+ uniform token light:shaderId = "DomeLight"
+ rel portals (
+ doc = "Optional portals to guide light sampling."
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class DomeLight_1 "DomeLight_1" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = """Light emitted inward from a distant external environment,
+ such as a sky or IBL light probe.
+
+ In this version of the dome light, the dome's default orientation is
+ determined by its *poleAxis* property. The fallback value, \"scene\", means
+ that the dome starts with its top pole aligned with the stage's up axis.
+
+ Note that the rotation necessary to align the dome light with its *poleAxis*
+ is intended to be applied by a renderer to only the dome itself, and *not*
+ to inherit down to any USD namespace children of the dome light prim.
+
+ If *poleAxis* is set to \"Y\" or \"scene\" and the stage's up axis is \"Y\", the
+ dome's default orientation will adhere to the OpenEXR specification for
+ latlong environment maps. From the OpenEXR documentation:
+
+ -------------------------------------------------------------------------
+ Latitude-Longitude Map:
+
+ The environment is projected onto the image using polar coordinates
+ (latitude and longitude). A pixel's x coordinate corresponds to
+ its longitude, and the y coordinate corresponds to its latitude.
+ Pixel (dataWindow.min.x, dataWindow.min.y) has latitude +pi/2 and
+ longitude +pi; pixel (dataWindow.max.x, dataWindow.max.y) has
+ latitude -pi/2 and longitude -pi.
+
+ In 3D space, latitudes -pi/2 and +pi/2 correspond to the negative and
+ positive y direction. Latitude 0, longitude 0 points into positive
+ z direction; and latitude 0, longitude pi/2 points into positive x
+ direction.
+
+ The size of the data window should be 2*N by N pixels (width by height),
+ where N can be any integer greater than 0.
+ -------------------------------------------------------------------------
+
+ If *poleAxis* is set to \"Z\" or \"scene\" and the stage's up axis is \"Z\",
+ latitudes -pi/2 and +pi/2 will instead correspond to the negative and
+ positive Z direction, and latitude 0, longitude 0 will instead point into
+ the negative Y direction in 3D space.
+"""
+)
+{
+ float guideRadius = 100000 (
+ displayGroup = "Guides"
+ displayName = "Radius"
+ doc = "The radius of guide geometry to use to visualize the dome light. The default is 1 km for scenes whose metersPerUnit is the USD default of 0.01 (i.e., 1 world unit is 1 cm)."
+ )
+ asset inputs:texture:file (
+ displayGroup = "Basic"
+ displayName = "Color Map"
+ doc = """A color texture to use on the dome, such as an HDR (high
+ dynamic range) texture intended for IBL (image based lighting)."""
+ )
+ token inputs:texture:format = "automatic" (
+ allowedTokens = ["automatic", "latlong", "mirroredBall", "angular", "cubeMapVerticalCross"]
+ displayGroup = "Basic"
+ displayName = "Color Map Format"
+ doc = """Specifies the parameterization of the color map file.
+ Valid values are:
+ - automatic: Tries to determine the layout from the file itself.
+ For example, Renderman texture files embed an explicit
+ parameterization.
+ - latlong: Latitude as X, longitude as Y.
+ - mirroredBall: An image of the environment reflected in a
+ sphere, using an implicitly orthogonal projection.
+ - angular: Similar to mirroredBall but the radial dimension
+ is mapped linearly to the angle, providing better sampling
+ at the edges.
+ - cubeMapVerticalCross: A cube map with faces laid out as a
+ vertical cross.
+ """
+ )
+ uniform token light:shaderId = "DomeLight"
+ uniform token poleAxis = "scene" (
+ allowedTokens = ["scene", "Y", "Z"]
+ displayGroup = "Advanced"
+ displayName = "Pole Axis"
+ doc = """A token which indicates the starting alignment of the dome
+ light's top pole. This alignment is for the dome itself and is *not*
+ inherited by the namespace children of the dome.
+ Valid values are:
+ - scene: The dome light's top pole is aligned with the stage's up axis.
+ - Y: The dome light's top pole is aligned with the +Y axis.
+ - Z: The dome light's top pole is aligned with the +Z axis.
+ """
+ )
+ rel portals (
+ doc = "Optional portals to guide light sampling."
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class PortalLight "PortalLight" (
+ apiSchemas = ["LightAPI"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["light:shaderId"]
+ }
+ doc = """A rectangular portal in the local XY plane that guides sampling
+ of a dome light. Transmits light in the -Z direction.
+ The rectangle is 1 unit in length."""
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ float inputs:height = 1 (
+ displayGroup = "Geometry"
+ displayName = "Height"
+ doc = "Height of the portal rectangle in the local Y axis."
+ )
+ float inputs:width = 1 (
+ displayGroup = "Geometry"
+ displayName = "Width"
+ doc = "Width of the portal rectangle in the local X axis."
+ )
+ uniform token light:shaderId = "PortalLight"
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class PluginLight "PluginLight" (
+ apiSchemas = ["NodeDefAPI", "LightAPI"]
+ doc = """Light that provides properties that allow it to identify an
+ external SdrShadingNode definition, through UsdShadeNodeDefAPI, that can be
+ provided to render delegates without the need to provide a schema
+ definition for the light's type.
+
+ \\see \\ref usdLux_PluginSchemas
+"""
+)
+{
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class PluginLightFilter "PluginLightFilter" (
+ apiSchemas = ["NodeDefAPI", "CollectionAPI:filterLink"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["collection:filterLink:includeRoot"]
+ }
+ doc = """Light filter that provides properties that allow it to identify an
+ external SdrShadingNode definition, through UsdShadeNodeDefAPI, that can be
+ provided to render delegates without the need to provide a schema
+ definition for the light filter's type.
+
+ \\see \\ref usdLux_PluginSchemas
+"""
+)
+{
+ uniform bool collection:filterLink:includeRoot = 1
+ uniform token lightFilter:shaderId = "" (
+ displayGroup = "Internal"
+ doc = """Default ID for the light filter's shader.
+ This defines the shader ID for this light filter when a render context
+ specific shader ID is not available.
+
+ \\see GetShaderId
+ \\see GetShaderIdAttrForRenderContext
+ \\see SdrRegistry::GetShaderNodeByIdentifier
+ \\see SdrRegistry::GetShaderNodeByIdentifierAndType
+ """
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ "UsdLuxBoundableLightBase": {
+ "alias": {
+ "UsdSchemaBase": "BoundableLightBase"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomBoundable"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdLuxCylinderLight": {
+ "alias": {
+ "UsdSchemaBase": "CylinderLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxBoundableLightBase"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxDiskLight": {
+ "alias": {
+ "UsdSchemaBase": "DiskLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxBoundableLightBase"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxDistantLight": {
+ "alias": {
+ "UsdSchemaBase": "DistantLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxNonboundableLightBase"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxDomeLight": {
+ "alias": {
+ "UsdSchemaBase": "DomeLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxNonboundableLightBase"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxDomeLight_1": {
+ "alias": {
+ "UsdSchemaBase": "DomeLight_1"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxNonboundableLightBase"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxGeometryLight": {
+ "alias": {
+ "UsdSchemaBase": "GeometryLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxNonboundableLightBase"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxLightAPI": {
+ "alias": {
+ "UsdSchemaBase": "LightAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "providesUsdShadeConnectableAPIBehavior": true,
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdLuxLightFilter": {
+ "alias": {
+ "UsdSchemaBase": "LightFilter"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomXformable"
+ ],
+ "providesUsdShadeConnectableAPIBehavior": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxLightListAPI": {
+ "alias": {
+ "UsdSchemaBase": "LightListAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdLuxListAPI": {
+ "alias": {
+ "UsdSchemaBase": "ListAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdLuxMeshLightAPI": {
+ "alias": {
+ "UsdSchemaBase": "MeshLightAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdLuxNonboundableLightBase": {
+ "alias": {
+ "UsdSchemaBase": "NonboundableLightBase"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomXformable"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdLuxPluginLight": {
+ "alias": {
+ "UsdSchemaBase": "PluginLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomXformable"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxPluginLightFilter": {
+ "alias": {
+ "UsdSchemaBase": "PluginLightFilter"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxLightFilter"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxPortalLight": {
+ "alias": {
+ "UsdSchemaBase": "PortalLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxBoundableLightBase"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxRectLight": {
+ "alias": {
+ "UsdSchemaBase": "RectLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxBoundableLightBase"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxShadowAPI": {
+ "alias": {
+ "UsdSchemaBase": "ShadowAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdLuxShapingAPI": {
+ "alias": {
+ "UsdSchemaBase": "ShapingAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdLuxSphereLight": {
+ "alias": {
+ "UsdSchemaBase": "SphereLight"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdLuxBoundableLightBase"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdLuxVolumeLightAPI": {
+ "alias": {
+ "UsdSchemaBase": "VolumeLightAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdLux_DiscoveryPlugin": {
+ "bases": [
+ "NdrDiscoveryPlugin"
+ ]
+ },
+ "UsdLux_LightDefParserPlugin": {
+ "bases": [
+ "NdrParserPlugin"
+ ]
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdLux",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+(
+ "This file describes the USD Lux light schemata for code generation."
+ subLayers = [
+ @usdGeom/schema.usda@
+ ]
+)
+
+over "GLOBAL" (
+ customData = {
+ string libraryName = "usdLux"
+ string libraryPath = "pxr/usd/usdLux"
+ dictionary libraryTokens = {
+ dictionary lightLink = {
+ string doc = """
+ This token represents the collection name to use
+ with UsdCollectionAPI to represent light-linking
+ of a prim with an applied UsdLuxLightAPI.
+ """
+ }
+ dictionary shadowLink = {
+ string doc = """
+ This token represents the collection name to use
+ with UsdCollectionAPI to represent shadow-linking
+ of a prim with an applied UsdLuxLightAPI.
+ """
+ }
+ dictionary filterLink = {
+ string doc = """
+ This token represents the collection name to use
+ with UsdCollectionAPI to represent filter-linking
+ of a UsdLuxLightFilter prim.
+ """
+ }
+ dictionary orientToStageUpAxis = {
+ string doc = """
+ This token represents the suffix for a UsdGeomXformOp
+ used to orient a light with the stage's up axis.
+ """
+ }
+ }
+ }
+)
+{
+}
+
+class "LightAPI" (
+ inherits =
+ doc = """API schema that imparts the quality of being a light onto a prim.
+
+ A light is any prim that has this schema applied to it. This is true
+ regardless of whether LightAPI is included as a built-in API of the prim
+ type (e.g. RectLight or DistantLight) or is applied directly to a Gprim
+ that should be treated as a light.
+
+ Linking
+
+ Lights can be linked to geometry. Linking controls which geometry
+ a light illuminates, and which geometry casts shadows from the light.
+
+ Linking is specified as collections (UsdCollectionAPI) which can
+ be accessed via GetLightLinkCollection() and GetShadowLinkCollection().
+ Note that these collections have their includeRoot set to true,
+ so that lights will illuminate and cast shadows from all objects
+ by default. To illuminate only a specific set of objects, there
+ are two options. One option is to modify the collection paths
+ to explicitly exclude everything else, assuming it is known;
+ the other option is to set includeRoot to false and explicitly
+ include the desired objects. These are complementary approaches
+ that may each be preferable depending on the scenario and how
+ to best express the intent of the light setup.
+ """
+ customData = {
+ dictionary extraPlugInfo = {
+ bool providesUsdShadeConnectableAPIBehavior = 1
+ }
+ string extraIncludes = """
+#include "pxr/usd/usd/collectionAPI.h"
+#include "pxr/usd/usdShade/input.h"
+#include "pxr/usd/usdShade/output.h" """
+ }
+ prepend apiSchemas = ["CollectionAPI:lightLink", "CollectionAPI:shadowLink"]
+) {
+ uniform bool collection:lightLink:includeRoot = 1 (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ uniform bool collection:shadowLink:includeRoot = 1 (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ uniform token light:shaderId = "" (
+ displayGroup = "Internal"
+ doc = """Default ID for the light's shader.
+ This defines the shader ID for this light when a render context specific
+ shader ID is not available.
+
+ The default shaderId for the intrinsic UsdLux lights (RectLight,
+ DistantLight, etc.) are set to default to the light's type name. For
+ each intrinsic UsdLux light, we will always register an SdrShaderNode in
+ the SdrRegistry, with the identifier matching the type name and the
+ source type "USD", that corresponds to the light's inputs.
+ \\see GetShaderId
+ \\see GetShaderIdAttrForRenderContext
+ \\see SdrRegistry::GetShaderNodeByIdentifier
+ \\see SdrRegistry::GetShaderNodeByIdentifierAndType
+ """
+ customData = {
+ token apiName = "shaderId"
+ }
+ )
+ uniform token light:materialSyncMode = "noMaterialResponse" (
+ displayGroup = "Geometry"
+ displayName = "Material Sync Mode"
+ doc = """For a LightAPI applied to geometry that has a bound Material,
+ which is entirely or partly emissive, this specifies the relationship
+ of the Material response to the lighting response.
+ Valid values are:
+ - materialGlowTintsLight: All primary and secondary rays see the
+ emissive/glow response as dictated by the bound Material while the
+ base color seen by light rays (which is then modulated by all of the
+ other LightAPI controls) is the multiplication of the color feeding
+ the emission/glow input of the Material (i.e. its surface or volume
+ shader) with the scalar or pattern input to *inputs:color*.
+ This allows the light's color to tint the geometry's glow color while
+ preserving access to intensity and other light controls as ways to
+ further modulate the illumination.
+ - independent: All primary and secondary rays see the emissive/glow
+ response as dictated by the bound Material, while the base color seen
+ by light rays is determined solely by *inputs:color*. Note that for
+ partially emissive geometry (in which some parts are reflective
+ rather than emissive), a suitable pattern must be connected to the
+ light's color input, or else the light will radiate uniformly from
+ the geometry.
+ - noMaterialResponse: The geometry behaves as if there is no Material
+ bound at all, i.e. there is no diffuse, specular, or transmissive
+ response. The base color of light rays is entirely controlled by the
+ *inputs:color*. This is the standard mode for "canonical" lights in
+ UsdLux and indicates to renderers that a Material will either never
+ be bound or can always be ignored.
+ """
+ allowedTokens = ["materialGlowTintsLight",
+ "independent",
+ "noMaterialResponse"]
+ customData = {
+ token apiName = "materialSyncMode"
+ }
+ )
+ float inputs:intensity = 1 (
+ displayGroup = "Basic"
+ displayName = "Intensity"
+ doc = """Scales the power of the light linearly."""
+ customData = {
+ token apiName = "intensity"
+ }
+ )
+ float inputs:exposure = 0 (
+ displayGroup = "Basic"
+ displayName = "Exposure"
+ doc = """Scales the power of the light exponentially as a power
+ of 2 (similar to an F-stop control over exposure). The result
+ is multiplied against the intensity."""
+ customData = {
+ token apiName = "exposure"
+ }
+ )
+ float inputs:diffuse = 1.0 (
+ displayGroup = "Refine"
+ displayName = "Diffuse Multiplier"
+ doc = """A multiplier for the effect of this light on the diffuse
+ response of materials. This is a non-physical control."""
+ customData = {
+ token apiName = "diffuse"
+ }
+ )
+ float inputs:specular = 1.0 (
+ displayGroup = "Refine"
+ displayName = "Specular Multiplier"
+ doc = """A multiplier for the effect of this light on the specular
+ response of materials. This is a non-physical control."""
+ customData = {
+ token apiName = "specular"
+ }
+ )
+ bool inputs:normalize = false (
+ displayGroup = "Advanced"
+ displayName = "Normalize Power"
+ doc = """Normalizes power by the surface area of the light.
+ This makes it easier to independently adjust the power and shape
+ of the light, by causing the power to not vary with the area or
+ angular size of the light."""
+ customData = {
+ token apiName = "normalize"
+ }
+ )
+ color3f inputs:color = (1, 1, 1) (
+ displayGroup = "Basic"
+ displayName = "Color"
+ doc = """The color of emitted light, in energy-linear terms."""
+ customData = {
+ token apiName = "color"
+ }
+ )
+ bool inputs:enableColorTemperature = false (
+ displayGroup = "Basic"
+ displayName = "Enable Color Temperature"
+ doc = """Enables using colorTemperature."""
+ customData = {
+ token apiName = "enableColorTemperature"
+ }
+ )
+ float inputs:colorTemperature = 6500 (
+ displayGroup = "Basic"
+ displayName = "Color Temperature"
+ doc = """Color temperature, in degrees Kelvin, representing the
+ white point. The default is a common white point, D65. Lower
+ values are warmer and higher values are cooler. The valid range
+ is from 1000 to 10000. Only takes effect when
+ enableColorTemperature is set to true. When active, the
+ computed result multiplies against the color attribute.
+ See UsdLuxBlackbodyTemperatureAsRgb()."""
+ customData = {
+ token apiName = "colorTemperature"
+ }
+ )
+ rel light:filters (
+ doc = """Relationship to the light filters that apply to this light."""
+ customData = {
+ token apiName = "filters"
+ }
+ )
+}
+
+class "MeshLightAPI" (
+ inherits =
+ doc = """This is the preferred API schema to apply to
+ \\ref UsdGeomMesh "Mesh" type prims when adding light behaviors to a mesh.
+ At its base, this API schema has the built-in behavior of applying LightAPI
+ to the mesh and overriding the default materialSyncMode to allow the
+ emission/glow of the bound material to affect the color of the light.
+ But, it additionally serves as a hook for plugins to attach additional
+ properties to "mesh lights" through the creation of API schemas which are
+ authored to auto-apply to MeshLightAPI.
+ \\see \\ref Usd_AutoAppliedAPISchemas
+ """
+ prepend apiSchemas = ["LightAPI"]
+) {
+ uniform token light:shaderId = "MeshLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ uniform token light:materialSyncMode = "materialGlowTintsLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+}
+
+class "VolumeLightAPI" (
+ inherits =
+ doc = """This is the preferred API schema to apply to
+ \\ref UsdVolVolume "Volume" type prims when adding light behaviors to a
+ volume. At its base, this API schema has the built-in behavior of applying
+ LightAPI to the volume and overriding the default materialSyncMode to allow
+ the emission/glow of the bound material to affect the color of the light.
+ But, it additionally serves as a hook for plugins to attach additional
+ properties to "volume lights" through the creation of API schemas which are
+ authored to auto-apply to VolumeLightAPI.
+ \\see \\ref Usd_AutoAppliedAPISchemas
+ """
+ prepend apiSchemas = ["LightAPI"]
+) {
+ uniform token light:shaderId = "VolumeLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ uniform token light:materialSyncMode = "materialGlowTintsLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+}
+
+class "LightListAPI" (
+ inherits =
+ doc = """API schema to support discovery and publishing of lights in a scene.
+
+ \\section UsdLuxLightListAPI_Discovery Discovering Lights via Traversal
+
+ To motivate this API, consider what is required to discover all
+ lights in a scene. We must load all payloads and traverse all prims:
+
+ \\code
+ 01 // Load everything on the stage so we can find all lights,
+ 02 // including those inside payloads
+ 03 stage->Load();
+ 04
+ 05 // Traverse all prims, checking if they have an applied UsdLuxLightAPI
+ 06 // (Note: ignoring instancing and a few other things for simplicity)
+ 07 SdfPathVector lights;
+ 08 for (UsdPrim prim: stage->Traverse()) {
+ 09 if (prim.HasAPI()) {
+ 10 lights.push_back(i->GetPath());
+ 11 }
+ 12 }
+ \\endcode
+
+ This traversal -- suitably elaborated to handle certain details --
+ is the first and simplest thing UsdLuxLightListAPI provides.
+ UsdLuxLightListAPI::ComputeLightList() performs this traversal and returns
+ all lights in the scene:
+
+ \\code
+ 01 UsdLuxLightListAPI listAPI(stage->GetPseudoRoot());
+ 02 SdfPathVector lights = listAPI.ComputeLightList();
+ \\endcode
+
+ \\section UsdLuxLightListAPI_LightList Publishing a Cached Light List
+
+ Consider a USD client that needs to quickly discover lights but
+ wants to defer loading payloads and traversing the entire scene
+ where possible, and is willing to do up-front computation and
+ caching to achieve that.
+
+ UsdLuxLightListAPI provides a way to cache the computed light list,
+ by publishing the list of lights onto prims in the model
+ hierarchy. Consider a big set that contains lights:
+
+ \\code
+ 01 def Xform "BigSetWithLights" (
+ 02 kind = "assembly"
+ 03 payload = @BigSetWithLights.usd@ // Heavy payload
+ 04 ) {
+ 05 // Pre-computed, cached list of lights inside payload
+ 06 rel lightList = [
+ 07 <./Lights/light_1>,
+ 08 <./Lights/light_2>,
+ 09 ...
+ 10 ]
+ 11 token lightList:cacheBehavior = "consumeAndContinue";
+ 12 }
+ \\endcode
+
+ The lightList relationship encodes a set of lights, and the
+ lightList:cacheBehavior property provides fine-grained
+ control over how to use that cache. (See details below.)
+
+ The cache can be created by first invoking
+ ComputeLightList(ComputeModeIgnoreCache) to pre-compute the list
+ and then storing the result with UsdLuxLightListAPI::StoreLightList().
+
+ To enable efficient retrieval of the cache, it should be stored
+ on a model hierarchy prim. Furthermore, note that while you can
+ use a UsdLuxLightListAPI bound to the pseudo-root prim to query the
+ lights (as in the example above) because it will perform a
+ traversal over descendants, you cannot store the cache back to the
+ pseduo-root prim.
+
+ To consult the cached list, we invoke
+ ComputeLightList(ComputeModeConsultModelHierarchyCache):
+
+ \\code
+ 01 // Find and load all lights, using lightList cache where available
+ 02 UsdLuxLightListAPI list(stage->GetPseudoRoot());
+ 03 SdfPathSet lights = list.ComputeLightList(
+ 04 UsdLuxLightListAPI::ComputeModeConsultModelHierarchyCache);
+ 05 stage.LoadAndUnload(lights, SdfPathSet());
+ \\endcode
+
+ In this mode, ComputeLightList() will traverse the model
+ hierarchy, accumulating cached light lists.
+
+ \\section UsdLuxLightListAPI_CacheBehavior Controlling Cache Behavior
+
+ The lightList:cacheBehavior property gives additional fine-grained
+ control over cache behavior:
+
+ \\li The fallback value, "ignore", indicates that the lightList should
+ be disregarded. This provides a way to invalidate cache entries.
+ Note that unless "ignore" is specified, a lightList with an empty
+ list of targets is considered a cache indicating that no lights
+ are present.
+
+ \\li The value "consumeAndContinue" indicates that the cache should
+ be consulted to contribute lights to the scene, and that recursion
+ should continue down the model hierarchy in case additional lights
+ are added as descedants. This is the default value established when
+ StoreLightList() is invoked. This behavior allows the lights within
+ a large model, such as the BigSetWithLights example above, to be
+ published outside the payload, while also allowing referencing and
+ layering to add additional lights over that set.
+
+ \\li The value "consumeAndHalt" provides a way to terminate recursive
+ traversal of the scene for light discovery. The cache will be
+ consulted but no descendant prims will be examined.
+
+ \\section UsdLuxLightListAPI_Instancing Instancing
+
+ Where instances are present, UsdLuxLightListAPI::ComputeLightList() will
+ return the instance-unique paths to any lights discovered within
+ those instances. Lights within a UsdGeomPointInstancer will
+ not be returned, however, since they cannot be referred to
+ solely via paths.
+"""
+) {
+ rel lightList (
+ doc = """Relationship to lights in the scene."""
+ )
+ token lightList:cacheBehavior (
+ doc = """Controls how the lightList should be interpreted.
+ Valid values are:
+ - consumeAndHalt: The lightList should be consulted,
+ and if it exists, treated as a final authoritative statement
+ of any lights that exist at or below this prim, halting
+ recursive discovery of lights.
+ - consumeAndContinue: The lightList should be consulted,
+ but recursive traversal over nameChildren should continue
+ in case additional lights are added by descendants.
+ - ignore: The lightList should be entirely ignored. This
+ provides a simple way to temporarily invalidate an existing
+ cache. This is the fallback behavior.
+ """
+ allowedTokens = ["consumeAndHalt", "consumeAndContinue", "ignore"]
+ )
+}
+
+class "ListAPI" (
+ inherits =
+ doc = """
+ \\deprecated
+ Use LightListAPI instead
+"""
+) {
+ rel lightList (
+ doc = """Relationship to lights in the scene."""
+ )
+ token lightList:cacheBehavior (
+ doc = """Controls how the lightList should be interpreted.
+ Valid values are:
+ - consumeAndHalt: The lightList should be consulted,
+ and if it exists, treated as a final authoritative statement
+ of any lights that exist at or below this prim, halting
+ recursive discovery of lights.
+ - consumeAndContinue: The lightList should be consulted,
+ but recursive traversal over nameChildren should continue
+ in case additional lights are added by descendants.
+ - ignore: The lightList should be entirely ignored. This
+ provides a simple way to temporarily invalidate an existing
+ cache. This is the fallback behavior.
+ """
+ allowedTokens = ["consumeAndHalt", "consumeAndContinue", "ignore"]
+ )
+}
+
+class "ShapingAPI" (
+ inherits =
+ doc = """Controls for shaping a light's emission."""
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usdShade/input.h"
+#include "pxr/usd/usdShade/output.h" """
+ }
+
+) {
+ float inputs:shaping:focus = 0 (
+ displayGroup = "Shaping"
+ displayName = "Emission Focus"
+ doc = """A control to shape the spread of light. Higher focus
+ values pull light towards the center and narrow the spread.
+ Implemented as an off-axis cosine power exponent.
+ TODO: clarify semantics"""
+ customData = {
+ token apiName = "shaping:focus"
+ }
+ )
+ color3f inputs:shaping:focusTint = (0, 0, 0) (
+ displayGroup = "Shaping"
+ displayName = "Emission Focus Tint"
+ doc = """Off-axis color tint. This tints the emission in the
+ falloff region. The default tint is black.
+ TODO: clarify semantics"""
+ customData = {
+ token apiName = "shaping:focusTint"
+ }
+ )
+ float inputs:shaping:cone:angle = 90 (
+ displayGroup = "Shaping"
+ displayName = "Cone Angle"
+ doc = """Angular limit off the primary axis to restrict the
+ light spread."""
+ customData = {
+ token apiName = "shaping:cone:angle"
+ }
+ )
+ float inputs:shaping:cone:softness = 0 (
+ displayGroup = "Shaping"
+ displayName = "Cone Softness"
+ doc = """Controls the cutoff softness for cone angle.
+ TODO: clarify semantics"""
+ customData = {
+ token apiName = "shaping:cone:softness"
+ }
+ )
+ asset inputs:shaping:ies:file (
+ displayGroup = "Shaping"
+ displayName = "IES Profile"
+ doc = """An IES (Illumination Engineering Society) light
+ profile describing the angular distribution of light."""
+ customData = {
+ token apiName = "shaping:ies:file"
+ }
+ )
+ float inputs:shaping:ies:angleScale = 0 (
+ displayGroup = "Shaping"
+ displayName = "Profile Scale"
+ doc = """Rescales the angular distribution of the IES profile.
+ TODO: clarify semantics"""
+ customData = {
+ token apiName = "shaping:ies:angleScale"
+ }
+ )
+ bool inputs:shaping:ies:normalize = false (
+ displayGroup = "Shaping"
+ displayName = "Profile Normalization"
+ doc = """Normalizes the IES profile so that it affects the shaping
+ of the light while preserving the overall energy output."""
+ customData = {
+ token apiName = "shaping:ies:normalize"
+ }
+ )
+}
+
+class "ShadowAPI" (
+ inherits =
+ doc = """Controls to refine a light's shadow behavior. These are
+ non-physical controls that are valuable for visual lighting work."""
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usdShade/input.h"
+#include "pxr/usd/usdShade/output.h" """
+ }
+
+) {
+ bool inputs:shadow:enable = true (
+ displayGroup = "Shadows"
+ displayName = "Enable Shadows"
+ doc = """Enables shadows to be cast by this light."""
+ customData = {
+ token apiName = "shadow:enable"
+ }
+ )
+ color3f inputs:shadow:color = (0, 0, 0) (
+ displayGroup = "Shadows"
+ displayName = "Shadow Color"
+ doc = """The color of shadows cast by the light. This is a
+ non-physical control. The default is to cast black shadows."""
+ customData = {
+ token apiName = "shadow:color"
+ }
+ )
+ float inputs:shadow:distance = -1.0 (
+ displayGroup = "Shadows"
+ displayName = "Shadow Max Distance"
+ doc = """The maximum distance shadows are cast. The distance is
+ measured as the distance between the point on the surface and the
+ occluder.
+ The default value (-1) indicates no limit.
+ """
+ customData = {
+ token apiName = "shadow:distance"
+ }
+ )
+ float inputs:shadow:falloff = -1.0 (
+ displayGroup = "Shadows"
+ displayName = "Shadow Falloff"
+ doc = """The size of the shadow falloff zone within the shadow max
+ distance, which can be used to hide the hard cut-off for shadows seen
+ stretching past the max distance. The falloff zone is the area that
+ fades from full shadowing at the beginning of the falloff zone to no
+ shadowing at the max distance from the occluder. The falloff zone
+ distance cannot exceed the shadow max distance. A falloff value equal
+ to or less than zero (with -1 as the default) indicates no falloff.
+ """
+ customData = {
+ token apiName = "shadow:falloff"
+ }
+ )
+ float inputs:shadow:falloffGamma = 1.0 (
+ displayGroup = "Shadows"
+ displayName = "Shadow Falloff Gamma"
+ doc = """A gamma (i.e., exponential) control over shadow strength
+ with linear distance within the falloff zone. This controls the rate
+ of the falloff.
+ This requires the use of shadowDistance and shadowFalloff."""
+ customData = {
+ token apiName = "shadow:falloffGamma"
+ }
+ )
+}
+
+class LightFilter "LightFilter" (
+ inherits =
+ doc = """A light filter modifies the effect of a light.
+ Lights refer to filters via relationships so that filters may be
+ shared.
+
+ Linking
+
+ Filters can be linked to geometry. Linking controls which geometry
+ a light-filter affects, when considering the light filters attached
+ to a light illuminating the geometry.
+
+ Linking is specified as a collection (UsdCollectionAPI) which can
+ be accessed via GetFilterLinkCollection().
+ """
+ customData = {
+ dictionary extraPlugInfo = {
+ bool providesUsdShadeConnectableAPIBehavior = 1
+ }
+ string extraIncludes = """
+#include "pxr/usd/usd/collectionAPI.h"
+#include "pxr/usd/usdShade/input.h"
+#include "pxr/usd/usdShade/output.h" """
+ }
+ prepend apiSchemas = ["CollectionAPI:filterLink"]
+) {
+ uniform bool collection:filterLink:includeRoot = 1 (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ uniform token lightFilter:shaderId = "" (
+ displayGroup = "Internal"
+ doc = """Default ID for the light filter's shader.
+ This defines the shader ID for this light filter when a render context
+ specific shader ID is not available.
+
+ \\see GetShaderId
+ \\see GetShaderIdAttrForRenderContext
+ \\see SdrRegistry::GetShaderNodeByIdentifier
+ \\see SdrRegistry::GetShaderNodeByIdentifierAndType
+ """
+ customData = {
+ token apiName = "shaderId"
+ }
+ )
+
+}
+
+class "BoundableLightBase" (
+ inherits =
+ doc = """Base class for intrinsic lights that are boundable.
+
+ The primary purpose of this class is to provide a direct API to the
+ functions provided by LightAPI for concrete derived light types.
+ """
+ customData = {
+ string extraIncludes = """#include "pxr/usd/usdLux/lightAPI.h" """
+ }
+ prepend apiSchemas = ["LightAPI"]
+) {
+}
+
+class "NonboundableLightBase" (
+ inherits =
+ doc = """Base class for intrinsic lights that are not boundable.
+
+ The primary purpose of this class is to provide a direct API to the
+ functions provided by LightAPI for concrete derived light types.
+ """
+ customData = {
+ string extraIncludes = """#include "pxr/usd/usdLux/lightAPI.h" """
+ }
+ prepend apiSchemas = ["LightAPI"]
+) {
+}
+
+class DistantLight "DistantLight" (
+ inherits =
+ doc = """Light emitted from a distant source along the -Z axis.
+ Also known as a directional light."""
+) {
+ uniform token light:shaderId = "DistantLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ float inputs:angle = 0.53 (
+ displayGroup = "Basic"
+ displayName = "Angle Extent"
+ doc = """Angular size of the light in degrees.
+ As an example, the Sun is approximately 0.53 degrees as seen from Earth.
+ Higher values broaden the light and therefore soften shadow edges.
+ """
+ customData = {
+ token apiName = "angle"
+ }
+ )
+ float inputs:intensity = 50000 (
+ doc = """Scales the emission of the light linearly.
+ The DistantLight has a high default intensity to approximate the Sun."""
+ customData = {
+ token apiName = "intensity"
+ bool apiSchemaOverride = true
+ }
+ )
+}
+
+class DiskLight "DiskLight" (
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = 1
+ }
+ }
+ inherits =
+ doc = """Light emitted from one side of a circular disk.
+ The disk is centered in the XY plane and emits light along the -Z axis."""
+) {
+ uniform token light:shaderId = "DiskLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ float inputs:radius = 0.5 (
+ displayGroup = "Geometry"
+ displayName = "Radius"
+ doc = "Radius of the disk."
+ customData = {
+ token apiName = "radius"
+ }
+ )
+}
+
+class RectLight "RectLight" (
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = 1
+ }
+ }
+ inherits =
+ doc = """Light emitted from one side of a rectangle.
+ The rectangle is centered in the XY plane and emits light along the -Z axis.
+ The rectangle is 1 unit in length in the X and Y axis. In the default
+ position, a texture file's min coordinates should be at (+X, +Y) and
+ max coordinates at (-X, -Y)."""
+) {
+ uniform token light:shaderId = "RectLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ float inputs:width = 1 (
+ displayGroup = "Geometry"
+ displayName = "Width"
+ doc = "Width of the rectangle, in the local X axis."
+ customData = {
+ token apiName = "width"
+ }
+
+ )
+ float inputs:height = 1 (
+ displayGroup = "Geometry"
+ displayName = "Height"
+ doc = "Height of the rectangle, in the local Y axis."
+ customData = {
+ token apiName = "height"
+ }
+ )
+ asset inputs:texture:file (
+ displayGroup = "Basic"
+ displayName = "Color Map"
+ doc = """A color texture to use on the rectangle."""
+ customData = {
+ token apiName = "textureFile"
+ }
+ )
+}
+
+class SphereLight "SphereLight" (
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = 1
+ }
+ }
+ inherits =
+ doc = """Light emitted outward from a sphere."""
+) {
+ uniform token light:shaderId = "SphereLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ float inputs:radius = 0.5 (
+ displayGroup = "Geometry"
+ displayName = "Radius"
+ doc = "Radius of the sphere."
+ customData = {
+ token apiName = "radius"
+ }
+ )
+ bool treatAsPoint = false (
+ displayGroup = "Advanced"
+ displayName = "Treat As Point"
+ doc = """A hint that this light can be treated as a 'point'
+ light (effectively, a zero-radius sphere) by renderers that
+ benefit from non-area lighting. Renderers that only support
+ area lights can disregard this."""
+ )
+}
+
+class CylinderLight "CylinderLight" (
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = 1
+ }
+ }
+ inherits =
+ doc = """Light emitted outward from a cylinder.
+ The cylinder is centered at the origin and has its major axis on the X axis.
+ The cylinder does not emit light from the flat end-caps.
+ """
+) {
+ uniform token light:shaderId = "CylinderLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ float inputs:length = 1 (
+ displayGroup = "Geometry"
+ displayName = "Length"
+ doc = "Length of the cylinder, in the local X axis."
+ customData = {
+ token apiName = "length"
+ }
+ )
+ float inputs:radius = 0.5 (
+ displayGroup = "Geometry"
+ displayName = "Radius"
+ doc = "Radius of the cylinder."
+ customData = {
+ token apiName = "radius"
+ }
+ )
+ bool treatAsLine = false (
+ displayGroup = "Advanced"
+ displayName = "Treat As Line"
+ doc = """A hint that this light can be treated as a 'line'
+ light (effectively, a zero-radius cylinder) by renderers that
+ benefit from non-area lighting. Renderers that only support
+ area lights can disregard this."""
+ )
+}
+
+class GeometryLight "GeometryLight" (
+ inherits =
+ doc = """\\deprecated
+ Light emitted outward from a geometric prim (UsdGeomGprim),
+ which is typically a mesh."""
+) {
+ rel geometry (
+ doc = """Relationship to the geometry to use as the light source."""
+ )
+ uniform token light:shaderId = "GeometryLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+}
+
+class DomeLight "DomeLight" (
+ inherits =
+ doc = """Light emitted inward from a distant external environment,
+ such as a sky or IBL light probe.
+
+ In this version of the dome light, the dome's default orientation is such
+ that its top pole is aligned with the world's +Y axis. This adheres to the
+ OpenEXR specification for latlong environment maps. From the OpenEXR
+ documentation:
+
+ -------------------------------------------------------------------------
+ Latitude-Longitude Map:
+
+ The environment is projected onto the image using polar coordinates
+ (latitude and longitude). A pixel's x coordinate corresponds to
+ its longitude, and the y coordinate corresponds to its latitude.
+ Pixel (dataWindow.min.x, dataWindow.min.y) has latitude +pi/2 and
+ longitude +pi; pixel (dataWindow.max.x, dataWindow.max.y) has
+ latitude -pi/2 and longitude -pi.
+
+ In 3D space, latitudes -pi/2 and +pi/2 correspond to the negative and
+ positive y direction. Latitude 0, longitude 0 points into positive
+ z direction; and latitude 0, longitude pi/2 points into positive x
+ direction.
+
+ The size of the data window should be 2*N by N pixels (width by height),
+ where N can be any integer greater than 0.
+ -------------------------------------------------------------------------
+"""
+) {
+ uniform token light:shaderId = "DomeLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ asset inputs:texture:file (
+ displayGroup = "Basic"
+ displayName = "Color Map"
+ doc = """A color texture to use on the dome, such as an HDR (high
+ dynamic range) texture intended for IBL (image based lighting)."""
+ customData = {
+ token apiName = "textureFile"
+ }
+ )
+ token inputs:texture:format = "automatic" (
+ displayGroup = "Basic"
+ displayName = "Color Map Format"
+ allowedTokens = ["automatic", "latlong", "mirroredBall", "angular", "cubeMapVerticalCross"]
+ doc = """Specifies the parameterization of the color map file.
+ Valid values are:
+ - automatic: Tries to determine the layout from the file itself.
+ For example, Renderman texture files embed an explicit
+ parameterization.
+ - latlong: Latitude as X, longitude as Y.
+ - mirroredBall: An image of the environment reflected in a
+ sphere, using an implicitly orthogonal projection.
+ - angular: Similar to mirroredBall but the radial dimension
+ is mapped linearly to the angle, providing better sampling
+ at the edges.
+ - cubeMapVerticalCross: A cube map with faces laid out as a
+ vertical cross.
+ """
+ customData = {
+ token apiName = "textureFormat"
+ }
+ )
+ rel portals (
+ doc = """Optional portals to guide light sampling."""
+ )
+ float guideRadius = 1.0e5 (
+ displayGroup = "Guides"
+ displayName = "Radius"
+ doc = """The radius of guide geometry to use to visualize the dome light. The default is 1 km for scenes whose metersPerUnit is the USD default of 0.01 (i.e., 1 world unit is 1 cm)."""
+ )
+}
+
+class DomeLight_1 "DomeLight_1" (
+ inherits =
+ doc = """Light emitted inward from a distant external environment,
+ such as a sky or IBL light probe.
+
+ In this version of the dome light, the dome's default orientation is
+ determined by its *poleAxis* property. The fallback value, "scene", means
+ that the dome starts with its top pole aligned with the stage's up axis.
+
+ Note that the rotation necessary to align the dome light with its *poleAxis*
+ is intended to be applied by a renderer to only the dome itself, and *not*
+ to inherit down to any USD namespace children of the dome light prim.
+
+ If *poleAxis* is set to "Y" or "scene" and the stage's up axis is "Y", the
+ dome's default orientation will adhere to the OpenEXR specification for
+ latlong environment maps. From the OpenEXR documentation:
+
+ -------------------------------------------------------------------------
+ Latitude-Longitude Map:
+
+ The environment is projected onto the image using polar coordinates
+ (latitude and longitude). A pixel's x coordinate corresponds to
+ its longitude, and the y coordinate corresponds to its latitude.
+ Pixel (dataWindow.min.x, dataWindow.min.y) has latitude +pi/2 and
+ longitude +pi; pixel (dataWindow.max.x, dataWindow.max.y) has
+ latitude -pi/2 and longitude -pi.
+
+ In 3D space, latitudes -pi/2 and +pi/2 correspond to the negative and
+ positive y direction. Latitude 0, longitude 0 points into positive
+ z direction; and latitude 0, longitude pi/2 points into positive x
+ direction.
+
+ The size of the data window should be 2*N by N pixels (width by height),
+ where N can be any integer greater than 0.
+ -------------------------------------------------------------------------
+
+ If *poleAxis* is set to "Z" or "scene" and the stage's up axis is "Z",
+ latitudes -pi/2 and +pi/2 will instead correspond to the negative and
+ positive Z direction, and latitude 0, longitude 0 will instead point into
+ the negative Y direction in 3D space.
+"""
+) {
+ uniform token light:shaderId = "DomeLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ asset inputs:texture:file (
+ displayGroup = "Basic"
+ displayName = "Color Map"
+ doc = """A color texture to use on the dome, such as an HDR (high
+ dynamic range) texture intended for IBL (image based lighting)."""
+ customData = {
+ token apiName = "textureFile"
+ }
+ )
+ token inputs:texture:format = "automatic" (
+ displayGroup = "Basic"
+ displayName = "Color Map Format"
+ allowedTokens = ["automatic", "latlong", "mirroredBall", "angular", "cubeMapVerticalCross"]
+ doc = """Specifies the parameterization of the color map file.
+ Valid values are:
+ - automatic: Tries to determine the layout from the file itself.
+ For example, Renderman texture files embed an explicit
+ parameterization.
+ - latlong: Latitude as X, longitude as Y.
+ - mirroredBall: An image of the environment reflected in a
+ sphere, using an implicitly orthogonal projection.
+ - angular: Similar to mirroredBall but the radial dimension
+ is mapped linearly to the angle, providing better sampling
+ at the edges.
+ - cubeMapVerticalCross: A cube map with faces laid out as a
+ vertical cross.
+ """
+ customData = {
+ token apiName = "textureFormat"
+ }
+ )
+ rel portals (
+ doc = """Optional portals to guide light sampling."""
+ )
+ float guideRadius = 1.0e5 (
+ displayGroup = "Guides"
+ displayName = "Radius"
+ doc = """The radius of guide geometry to use to visualize the dome light. The default is 1 km for scenes whose metersPerUnit is the USD default of 0.01 (i.e., 1 world unit is 1 cm)."""
+ )
+ uniform token poleAxis = "scene" (
+ displayGroup = "Advanced"
+ displayName = "Pole Axis"
+ allowedTokens = ["scene", "Y", "Z"]
+ doc = """A token which indicates the starting alignment of the dome
+ light's top pole. This alignment is for the dome itself and is *not*
+ inherited by the namespace children of the dome.
+ Valid values are:
+ - scene: The dome light's top pole is aligned with the stage's up axis.
+ - Y: The dome light's top pole is aligned with the +Y axis.
+ - Z: The dome light's top pole is aligned with the +Z axis.
+ """
+ )
+}
+
+class PortalLight "PortalLight" (
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = 1
+ }
+ }
+ inherits =
+ doc = """A rectangular portal in the local XY plane that guides sampling
+ of a dome light. Transmits light in the -Z direction.
+ The rectangle is 1 unit in length."""
+) {
+ uniform token light:shaderId = "PortalLight" (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+ float inputs:width = 1 (
+ displayGroup = "Geometry"
+ displayName = "Width"
+ doc = "Width of the portal rectangle in the local X axis."
+ customData = {
+ token apiName = "width"
+ }
+
+ )
+ float inputs:height = 1 (
+ displayGroup = "Geometry"
+ displayName = "Height"
+ doc = "Height of the portal rectangle in the local Y axis."
+ customData = {
+ token apiName = "height"
+ }
+ )
+}
+
+class PluginLight "PluginLight" (
+ inherits =
+ doc = """Light that provides properties that allow it to identify an
+ external SdrShadingNode definition, through UsdShadeNodeDefAPI, that can be
+ provided to render delegates without the need to provide a schema
+ definition for the light's type.
+
+ \\see \\ref usdLux_PluginSchemas
+"""
+ prepend apiSchemas = ["NodeDefAPI", "LightAPI"]
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usdShade/nodeDefAPI.h" """
+ }
+) {
+}
+
+class PluginLightFilter "PluginLightFilter" (
+ inherits =
+ doc = """Light filter that provides properties that allow it to identify an
+ external SdrShadingNode definition, through UsdShadeNodeDefAPI, that can be
+ provided to render delegates without the need to provide a schema
+ definition for the light filter's type.
+
+ \\see \\ref usdLux_PluginSchemas
+"""
+ prepend apiSchemas = ["NodeDefAPI"]
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usdShade/nodeDefAPI.h" """
+ }
+) {
+}
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class "RenderSettingsBase" (
+ doc = """Abstract base class that defines render settings that
+ can be specified on either a RenderSettings prim or a RenderProduct
+ prim."""
+)
+{
+ uniform token aspectRatioConformPolicy = "expandAperture" (
+ allowedTokens = ["expandAperture", "cropAperture", "adjustApertureWidth", "adjustApertureHeight", "adjustPixelAspectRatio"]
+ doc = '''Indicates the policy to use to resolve an aspect
+ ratio mismatch between the camera aperture and image settings.
+
+ This policy allows a standard render setting to do something
+ reasonable given varying camera inputs.
+
+ The camera aperture aspect ratio is determined by the
+ aperture atributes on the UsdGeomCamera.
+
+ The image aspect ratio is determined by the resolution and
+ pixelAspectRatio attributes in the render settings.
+
+ - "expandAperture": if necessary, expand the aperture to
+ fit the image, exposing additional scene content
+ - "cropAperture": if necessary, crop the aperture to fit
+ the image, cropping scene content
+ - "adjustApertureWidth": if necessary, adjust aperture width
+ to make its aspect ratio match the image
+ - "adjustApertureHeight": if necessary, adjust aperture height
+ to make its aspect ratio match the image
+ - "adjustPixelAspectRatio": compute pixelAspectRatio to
+ make the image exactly cover the aperture; disregards
+ existing attribute value of pixelAspectRatio
+ '''
+ )
+ rel camera (
+ doc = """The _camera_ relationship specifies the primary
+ camera to use in a render. It must target a UsdGeomCamera."""
+ )
+ uniform float4 dataWindowNDC = (0, 0, 1, 1) (
+ doc = """dataWindowNDC specifies the axis-aligned rectangular
+ region in the adjusted aperture window within which the renderer
+ should produce data.
+
+ It is specified as (xmin, ymin, xmax, ymax) in normalized
+ device coordinates, where the range 0 to 1 corresponds to the
+ aperture. (0,0) corresponds to the bottom-left
+ corner and (1,1) corresponds to the upper-right corner.
+
+ Specifying a window outside the unit square will produce
+ overscan data. Specifying a window that does not cover the unit
+ square will produce a cropped render.
+
+ A pixel is included in the rendered result if the pixel
+ center is contained by the data window. This is consistent
+ with standard rules used by polygon rasterization engines.
+ \\ref UsdRenderRasterization
+
+ The data window is expressed in NDC so that cropping and
+ overscan may be resolution independent. In interactive
+ workflows, incremental cropping and resolution adjustment
+ may be intermixed to isolate and examine parts of the scene.
+ In compositing workflows, overscan may be used to support
+ image post-processing kernels, and reduced-resolution proxy
+ renders may be used for faster iteration.
+
+ The dataWindow:ndc coordinate system references the
+ aperture after any adjustments required by
+ aspectRatioConformPolicy.
+ """
+ )
+ uniform bool disableDepthOfField = 0 (
+ doc = """Disable all depth of field by setting F-stop of the targeted
+ camera to infinity."""
+ )
+ uniform bool disableMotionBlur = 0 (
+ doc = """Disable all motion blur by setting the shutter interval
+ of the targeted camera to [0,0] - that is, take only one sample,
+ namely at the current time code."""
+ )
+ uniform bool instantaneousShutter = 0 (
+ doc = """Deprecated - use disableMotionBlur instead. Override
+ the targeted _camera_'s _shutterClose_ to be equal to the
+ value of its _shutterOpen_, to produce a zero-width shutter
+ interval. This gives us a convenient way to disable motion
+ blur."""
+ )
+ uniform float pixelAspectRatio = 1 (
+ doc = """The aspect ratio (width/height) of image pixels..
+ The default ratio 1.0 indicates square pixels."""
+ )
+ uniform int2 resolution = (2048, 1080) (
+ doc = """The image pixel resolution, corresponding to the
+ camera's screen window."""
+ )
+}
+
+class RenderSettings "RenderSettings" (
+ doc = """A UsdRenderSettings prim specifies global settings for
+ a render process, including an enumeration of the RenderProducts
+ that should result, and the UsdGeomImageable purposes that should
+ be rendered. \\ref UsdRenderHowSettingsAffectRendering"""
+)
+{
+ uniform token aspectRatioConformPolicy = "expandAperture" (
+ allowedTokens = ["expandAperture", "cropAperture", "adjustApertureWidth", "adjustApertureHeight", "adjustPixelAspectRatio"]
+ doc = '''Indicates the policy to use to resolve an aspect
+ ratio mismatch between the camera aperture and image settings.
+
+ This policy allows a standard render setting to do something
+ reasonable given varying camera inputs.
+
+ The camera aperture aspect ratio is determined by the
+ aperture atributes on the UsdGeomCamera.
+
+ The image aspect ratio is determined by the resolution and
+ pixelAspectRatio attributes in the render settings.
+
+ - "expandAperture": if necessary, expand the aperture to
+ fit the image, exposing additional scene content
+ - "cropAperture": if necessary, crop the aperture to fit
+ the image, cropping scene content
+ - "adjustApertureWidth": if necessary, adjust aperture width
+ to make its aspect ratio match the image
+ - "adjustApertureHeight": if necessary, adjust aperture height
+ to make its aspect ratio match the image
+ - "adjustPixelAspectRatio": compute pixelAspectRatio to
+ make the image exactly cover the aperture; disregards
+ existing attribute value of pixelAspectRatio
+ '''
+ )
+ rel camera (
+ doc = """The _camera_ relationship specifies the primary
+ camera to use in a render. It must target a UsdGeomCamera."""
+ )
+ uniform float4 dataWindowNDC = (0, 0, 1, 1) (
+ doc = """dataWindowNDC specifies the axis-aligned rectangular
+ region in the adjusted aperture window within which the renderer
+ should produce data.
+
+ It is specified as (xmin, ymin, xmax, ymax) in normalized
+ device coordinates, where the range 0 to 1 corresponds to the
+ aperture. (0,0) corresponds to the bottom-left
+ corner and (1,1) corresponds to the upper-right corner.
+
+ Specifying a window outside the unit square will produce
+ overscan data. Specifying a window that does not cover the unit
+ square will produce a cropped render.
+
+ A pixel is included in the rendered result if the pixel
+ center is contained by the data window. This is consistent
+ with standard rules used by polygon rasterization engines.
+ \\ref UsdRenderRasterization
+
+ The data window is expressed in NDC so that cropping and
+ overscan may be resolution independent. In interactive
+ workflows, incremental cropping and resolution adjustment
+ may be intermixed to isolate and examine parts of the scene.
+ In compositing workflows, overscan may be used to support
+ image post-processing kernels, and reduced-resolution proxy
+ renders may be used for faster iteration.
+
+ The dataWindow:ndc coordinate system references the
+ aperture after any adjustments required by
+ aspectRatioConformPolicy.
+ """
+ )
+ uniform bool disableDepthOfField = 0 (
+ doc = """Disable all depth of field by setting F-stop of the targeted
+ camera to infinity."""
+ )
+ uniform bool disableMotionBlur = 0 (
+ doc = """Disable all motion blur by setting the shutter interval
+ of the targeted camera to [0,0] - that is, take only one sample,
+ namely at the current time code."""
+ )
+ uniform token[] includedPurposes = ["default", "render"] (
+ doc = """The list of UsdGeomImageable _purpose_ values that
+ should be included in the render. Note this cannot be
+ specified per-RenderProduct because it is a statement of
+ which geometry is present."""
+ )
+ uniform bool instantaneousShutter = 0 (
+ doc = """Deprecated - use disableMotionBlur instead. Override
+ the targeted _camera_'s _shutterClose_ to be equal to the
+ value of its _shutterOpen_, to produce a zero-width shutter
+ interval. This gives us a convenient way to disable motion
+ blur."""
+ )
+ uniform token[] materialBindingPurposes = ["full", ""] (
+ allowedTokens = ["full", "preview", ""]
+ doc = '''Ordered list of material purposes to consider when
+ resolving material bindings in the scene. The empty string
+ indicates the "allPurpose" binding.'''
+ )
+ uniform float pixelAspectRatio = 1 (
+ doc = """The aspect ratio (width/height) of image pixels..
+ The default ratio 1.0 indicates square pixels."""
+ )
+ rel products (
+ doc = """The set of RenderProducts the render should produce.
+ This relationship should target UsdRenderProduct prims.
+ If no _products_ are specified, an application should produce
+ an rgb image according to the RenderSettings configuration,
+ to a default display or image name."""
+ )
+ uniform token renderingColorSpace (
+ doc = """Describes a renderer's working (linear) colorSpace where all
+ the renderer/shader math is expected to happen. When no
+ renderingColorSpace is provided, renderer should use its own default."""
+ )
+ uniform int2 resolution = (2048, 1080) (
+ doc = """The image pixel resolution, corresponding to the
+ camera's screen window."""
+ )
+}
+
+class RenderVar "RenderVar" (
+ doc = """A UsdRenderVar describes a custom data variable for
+ a render to produce. The prim describes the source of the data, which
+ can be a shader output or an LPE (Light Path Expression), and also
+ allows encoding of (generally renderer-specific) parameters that
+ configure the renderer for computing the variable.
+
+ \\note The name of the RenderVar prim drives the name of the data
+ variable that the renderer will produce.
+
+ \\note In the future, UsdRender may standardize RenderVar representation
+ for well-known variables under the sourceType `intrinsic`,
+ such as _r_, _g_, _b_, _a_, _z_, or _id_.
+ """
+)
+{
+ uniform token dataType = "color3f" (
+ doc = "The type of this channel, as a USD attribute type."
+ )
+ uniform string sourceName = "" (
+ doc = """The renderer should look for an output of this name
+ as the computed value for the RenderVar."""
+ )
+ uniform token sourceType = "raw" (
+ allowedTokens = ["raw", "primvar", "lpe", "intrinsic"]
+ doc = '''Indicates the type of the source.
+
+ - "raw": The name should be passed directly to the
+ renderer. This is the default behavior.
+ - "primvar": This source represents the name of a primvar.
+ Some renderers may use this to ensure that the primvar
+ is provided; other renderers may require that a suitable
+ material network be provided, in which case this is simply
+ an advisory setting.
+ - "lpe": Specifies a Light Path Expression in the
+ [OSL Light Path Expressions language](https://github.com/imageworks/OpenShadingLanguage/wiki/OSL-Light-Path-Expressions) as the source for
+ this RenderVar. Some renderers may use extensions to
+ that syntax, which will necessarily be non-portable.
+ - "intrinsic": This setting is currently unimplemented,
+ but represents a future namespace for UsdRender to provide
+ portable baseline RenderVars, such as camera depth, that
+ may have varying implementations for each renderer.
+ '''
+ )
+}
+
+class RenderProduct "RenderProduct" (
+ doc = """A UsdRenderProduct describes an image or other
+ file-like artifact produced by a render. A RenderProduct
+ combines one or more RenderVars into a file or interactive
+ buffer. It also provides all the controls established in
+ UsdRenderSettingsBase as optional overrides to whatever the
+ owning UsdRenderSettings prim dictates.
+
+ Specific renderers may support additional settings, such
+ as a way to configure compression settings, filetype metadata,
+ and so forth. Such settings can be encoded using
+ renderer-specific API schemas applied to the product prim.
+ """
+)
+{
+ uniform token aspectRatioConformPolicy = "expandAperture" (
+ allowedTokens = ["expandAperture", "cropAperture", "adjustApertureWidth", "adjustApertureHeight", "adjustPixelAspectRatio"]
+ doc = '''Indicates the policy to use to resolve an aspect
+ ratio mismatch between the camera aperture and image settings.
+
+ This policy allows a standard render setting to do something
+ reasonable given varying camera inputs.
+
+ The camera aperture aspect ratio is determined by the
+ aperture atributes on the UsdGeomCamera.
+
+ The image aspect ratio is determined by the resolution and
+ pixelAspectRatio attributes in the render settings.
+
+ - "expandAperture": if necessary, expand the aperture to
+ fit the image, exposing additional scene content
+ - "cropAperture": if necessary, crop the aperture to fit
+ the image, cropping scene content
+ - "adjustApertureWidth": if necessary, adjust aperture width
+ to make its aspect ratio match the image
+ - "adjustApertureHeight": if necessary, adjust aperture height
+ to make its aspect ratio match the image
+ - "adjustPixelAspectRatio": compute pixelAspectRatio to
+ make the image exactly cover the aperture; disregards
+ existing attribute value of pixelAspectRatio
+ '''
+ )
+ rel camera (
+ doc = """The _camera_ relationship specifies the primary
+ camera to use in a render. It must target a UsdGeomCamera."""
+ )
+ uniform float4 dataWindowNDC = (0, 0, 1, 1) (
+ doc = """dataWindowNDC specifies the axis-aligned rectangular
+ region in the adjusted aperture window within which the renderer
+ should produce data.
+
+ It is specified as (xmin, ymin, xmax, ymax) in normalized
+ device coordinates, where the range 0 to 1 corresponds to the
+ aperture. (0,0) corresponds to the bottom-left
+ corner and (1,1) corresponds to the upper-right corner.
+
+ Specifying a window outside the unit square will produce
+ overscan data. Specifying a window that does not cover the unit
+ square will produce a cropped render.
+
+ A pixel is included in the rendered result if the pixel
+ center is contained by the data window. This is consistent
+ with standard rules used by polygon rasterization engines.
+ \\ref UsdRenderRasterization
+
+ The data window is expressed in NDC so that cropping and
+ overscan may be resolution independent. In interactive
+ workflows, incremental cropping and resolution adjustment
+ may be intermixed to isolate and examine parts of the scene.
+ In compositing workflows, overscan may be used to support
+ image post-processing kernels, and reduced-resolution proxy
+ renders may be used for faster iteration.
+
+ The dataWindow:ndc coordinate system references the
+ aperture after any adjustments required by
+ aspectRatioConformPolicy.
+ """
+ )
+ uniform bool disableDepthOfField = 0 (
+ doc = """Disable all depth of field by setting F-stop of the targeted
+ camera to infinity."""
+ )
+ uniform bool disableMotionBlur = 0 (
+ doc = """Disable all motion blur by setting the shutter interval
+ of the targeted camera to [0,0] - that is, take only one sample,
+ namely at the current time code."""
+ )
+ uniform bool instantaneousShutter = 0 (
+ doc = """Deprecated - use disableMotionBlur instead. Override
+ the targeted _camera_'s _shutterClose_ to be equal to the
+ value of its _shutterOpen_, to produce a zero-width shutter
+ interval. This gives us a convenient way to disable motion
+ blur."""
+ )
+ rel orderedVars (
+ doc = """Specifies the RenderVars that should be consumed and
+ combined into the final product. If ordering is relevant to the
+ output driver, then the ordering of targets in this relationship
+ provides the order to use."""
+ )
+ uniform float pixelAspectRatio = 1 (
+ doc = """The aspect ratio (width/height) of image pixels..
+ The default ratio 1.0 indicates square pixels."""
+ )
+ token productName = "" (
+ doc = """Specifies the name that the output/display driver
+ should give the product. This is provided as-authored to the
+ driver, whose responsibility it is to situate the product on a
+ filesystem or other storage, in the desired location."""
+ )
+ uniform token productType = "raster" (
+ doc = '''The type of output to produce.
+ The default, "raster", indicates a 2D image.
+
+ \\note In the future, UsdRender may define additional product
+ types.'''
+ )
+ uniform int2 resolution = (2048, 1080) (
+ doc = """The image pixel resolution, corresponding to the
+ camera's screen window."""
+ )
+}
+
+class RenderPass "RenderPass" (
+ apiSchemas = ["CollectionAPI:renderVisibility"]
+ customData = {
+ token[] apiSchemaOverridePropertyNames = ["collection:renderVisibility:includeRoot"]
+ }
+ doc = """A RenderPass prim encapsulates the necessary information
+ to generate multipass renders. It houses properties for generating
+ dependencies and the necessary commands to run to generate renders, as
+ well as visibility controls for the scene. While RenderSettings
+ describes the information needed to generate images from a single
+ invocation of a renderer, RenderPass describes the additional information
+ needed to generate a time varying set of images.
+
+ There are two consumers of RenderPass prims - a runtime executable that
+ generates images from usdRender prims, and pipeline specific code that
+ translates between usdRender prims and the pipeline's resource scheduling
+ software. We'll refer to the latter as 'job submission code'.
+
+ \\anchor usdRender_renderVisibility
+ The objects that are relevant to the render is specified via the
+ renderVisibility collection (UsdCollectionAPI) and can be accessed via
+ GetRenderVisibilityCollectionAPI(). This collection has includeRoot set to
+ true so that all objects participate in the render by default. To render
+ only a specific set of objects, there are two options. One is to modify the
+ collection paths to explicitly exclude objects that don't participate in
+ the render, assuming it is known; the other option is to set includeRoot to
+ false and explicitly include the desired objects. These are complementary
+ approaches that may each be preferable depending on the scenario.
+
+ The name of the prim is used as the pass's name.
+ """
+)
+{
+ uniform bool collection:renderVisibility:includeRoot = 1
+ uniform string[] command (
+ doc = '''The command to run in order to generate
+ renders for this pass. The job submission code can use
+ this to properly send tasks to the job scheduling software
+ that will generate products.
+
+ The command can contain variables that will be substituted
+ appropriately during submission, as seen in the example below
+ with {fileName}.
+
+ For example:
+ command[0] = "prman"
+ command[1] = "-progress"
+ command[2] = "-pixelvariance"
+ command[3] = "-0.15"
+ command[4] = "{fileName}" # the fileName property will be substituted
+ '''
+ )
+ uniform bool denoise:enable = 0 (
+ doc = "When True, this Pass pass should be denoised."
+ )
+ rel denoise:pass (
+ doc = """The The UsdRenderDenoisePass prim from which to
+ source denoise settings.
+ """
+ )
+ uniform asset fileName (
+ doc = """The asset that contains the rendering prims or other
+ information needed to render this pass.
+ """
+ )
+ rel inputPasses (
+ doc = """The set of other Passes that this Pass depends on
+ in order to be constructed properly. For example, a Pass A
+ may generate a texture, which is then used as an input to
+ Pass B.
+
+ By default, usdRender makes some assumptions about the
+ relationship between this prim and the prims listed in inputPasses.
+ Namely, when per-frame tasks are generated from these pass prims,
+ usdRender will assume a one-to-one relationship between tasks
+ that share their frame number. Consider a pass named 'composite'
+ whose _inputPasses_ targets a Pass prim named 'beauty`.
+ By default, each frame for 'composite' will depend on the
+ same frame from 'beauty':
+ beauty.1 -> composite.1
+ beauty.2 -> composite.2
+ etc
+
+ The consumer of this RenderPass graph of inputs will need to resolve
+ the transitive dependencies.
+ """
+ )
+ uniform token passType (
+ doc = """A string used to categorize differently structured
+ or executed types of passes within a customized pipeline.
+
+ For example, when multiple DCC's (e.g. Houdini, Katana, Nuke)
+ each compute and contribute different Products to a final result,
+ it may be clearest and most flexible to create a separate
+ RenderPass for each.
+ """
+ )
+ rel renderSource (
+ doc = """The source prim to render from. If _fileName_ is not present,
+ the source is assumed to be a RenderSettings prim present in the current
+ Usd stage. If fileName is present, the source should be found in the
+ file there. This relationship might target a string attribute on this
+ or another prim that identifies the appropriate object in the external
+ container.
+
+ For example, for a Usd-backed pass, this would point to a RenderSettings
+ prim. Houdini passes would point to a Rop. Nuke passes would point to
+ a write node.
+ """
+ )
+}
+
+class RenderDenoisePass "RenderDenoisePass" (
+ doc = """A RenderDenoisePass generates renders via a denoising process.
+ This may be the same renderer that a pipeline uses for UsdRender,
+ or may be a separate one. Notably, a RenderDenoisePass requires
+ another Pass to be present for it to operate. The denoising process
+ itself is not generative, and requires images inputs to operate.
+
+ As denoising integration varies so widely across pipelines, all
+ implementation details are left to pipeline-specific prims
+ that inherit from RenderDenoisePass.
+ """
+)
+{
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "SdfMetadata": {
+ "renderSettingsPrimPath": {
+ "appliesTo": "layers",
+ "default": "",
+ "type": "string"
+ }
+ },
+ "Types": {
+ "UsdRenderDenoisePass": {
+ "alias": {
+ "UsdSchemaBase": "RenderDenoisePass"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdRenderPass": {
+ "alias": {
+ "UsdSchemaBase": "RenderPass"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdRenderProduct": {
+ "alias": {
+ "UsdSchemaBase": "RenderProduct"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdRenderSettingsBase"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdRenderSettings": {
+ "alias": {
+ "UsdSchemaBase": "RenderSettings"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdRenderSettingsBase"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdRenderSettingsBase": {
+ "alias": {
+ "UsdSchemaBase": "RenderSettingsBase"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdRenderVar": {
+ "alias": {
+ "UsdSchemaBase": "RenderVar"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "concreteTyped"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdRender",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+(
+ "This file describes the USD Render schemata for code generation."
+ subLayers = [
+ @usd/schema.usda@
+ ]
+)
+
+over "GLOBAL" (
+ customData = {
+ string libraryName = "usdRender"
+ string libraryPath = "pxr/usd/usdRender"
+ dictionary libraryTokens = {
+ dictionary renderSettingsPrimPath = {
+ string doc = """Stage-level metadata that encodes the
+ path to UsdRenderSettingsPrim to use for rendering."""
+ }
+ dictionary renderVisibility = {
+ string doc = """
+ This token represents the collection name to use
+ with UsdCollectionAPI to represent renderVisibility
+ of a RenderPass prim.
+ """
+ }
+ }
+ }
+)
+{
+}
+
+class "RenderSettingsBase" (
+ inherits =
+ doc = """Abstract base class that defines render settings that
+ can be specified on either a RenderSettings prim or a RenderProduct
+ prim."""
+ customData = {
+ string className = "SettingsBase"
+ }
+)
+{
+ rel camera (
+ doc = """The _camera_ relationship specifies the primary
+ camera to use in a render. It must target a UsdGeomCamera."""
+ )
+ uniform int2 resolution = (2048, 1080) (
+ doc = """The image pixel resolution, corresponding to the
+ camera's screen window."""
+ )
+ uniform float pixelAspectRatio = 1.0 (
+ doc = """The aspect ratio (width/height) of image pixels..
+ The default ratio 1.0 indicates square pixels."""
+ )
+ uniform token aspectRatioConformPolicy = "expandAperture" (
+ doc = """Indicates the policy to use to resolve an aspect
+ ratio mismatch between the camera aperture and image settings.
+
+ This policy allows a standard render setting to do something
+ reasonable given varying camera inputs.
+
+ The camera aperture aspect ratio is determined by the
+ aperture atributes on the UsdGeomCamera.
+
+ The image aspect ratio is determined by the resolution and
+ pixelAspectRatio attributes in the render settings.
+
+ - "expandAperture": if necessary, expand the aperture to
+ fit the image, exposing additional scene content
+ - "cropAperture": if necessary, crop the aperture to fit
+ the image, cropping scene content
+ - "adjustApertureWidth": if necessary, adjust aperture width
+ to make its aspect ratio match the image
+ - "adjustApertureHeight": if necessary, adjust aperture height
+ to make its aspect ratio match the image
+ - "adjustPixelAspectRatio": compute pixelAspectRatio to
+ make the image exactly cover the aperture; disregards
+ existing attribute value of pixelAspectRatio
+ """
+ allowedTokens = ["expandAperture", "cropAperture", "adjustApertureWidth", "adjustApertureHeight", "adjustPixelAspectRatio"]
+ )
+ uniform float4 dataWindowNDC = (0.0, 0.0, 1.0, 1.0) (
+ doc = """dataWindowNDC specifies the axis-aligned rectangular
+ region in the adjusted aperture window within which the renderer
+ should produce data.
+
+ It is specified as (xmin, ymin, xmax, ymax) in normalized
+ device coordinates, where the range 0 to 1 corresponds to the
+ aperture. (0,0) corresponds to the bottom-left
+ corner and (1,1) corresponds to the upper-right corner.
+
+ Specifying a window outside the unit square will produce
+ overscan data. Specifying a window that does not cover the unit
+ square will produce a cropped render.
+
+ A pixel is included in the rendered result if the pixel
+ center is contained by the data window. This is consistent
+ with standard rules used by polygon rasterization engines.
+ \\ref UsdRenderRasterization
+
+ The data window is expressed in NDC so that cropping and
+ overscan may be resolution independent. In interactive
+ workflows, incremental cropping and resolution adjustment
+ may be intermixed to isolate and examine parts of the scene.
+ In compositing workflows, overscan may be used to support
+ image post-processing kernels, and reduced-resolution proxy
+ renders may be used for faster iteration.
+
+ The dataWindow:ndc coordinate system references the
+ aperture after any adjustments required by
+ aspectRatioConformPolicy.
+ """
+ )
+ uniform bool instantaneousShutter = false (
+ doc = """Deprecated - use disableMotionBlur instead. Override
+ the targeted _camera_'s _shutterClose_ to be equal to the
+ value of its _shutterOpen_, to produce a zero-width shutter
+ interval. This gives us a convenient way to disable motion
+ blur."""
+ )
+ uniform bool disableMotionBlur = false (
+ doc = """Disable all motion blur by setting the shutter interval
+ of the targeted camera to [0,0] - that is, take only one sample,
+ namely at the current time code."""
+ )
+ uniform bool disableDepthOfField = false (
+ doc = """Disable all depth of field by setting F-stop of the targeted
+ camera to infinity."""
+ )
+}
+
+class RenderSettings "RenderSettings" (
+ inherits =
+ doc = """A UsdRenderSettings prim specifies global settings for
+ a render process, including an enumeration of the RenderProducts
+ that should result, and the UsdGeomImageable purposes that should
+ be rendered. \\ref UsdRenderHowSettingsAffectRendering"""
+ customData = {
+ string className = "Settings"
+ }
+)
+{
+ rel products (
+ doc = """The set of RenderProducts the render should produce.
+ This relationship should target UsdRenderProduct prims.
+ If no _products_ are specified, an application should produce
+ an rgb image according to the RenderSettings configuration,
+ to a default display or image name."""
+ )
+ uniform token[] includedPurposes = ["default", "render"] (
+ doc = """The list of UsdGeomImageable _purpose_ values that
+ should be included in the render. Note this cannot be
+ specified per-RenderProduct because it is a statement of
+ which geometry is present."""
+ )
+ uniform token[] materialBindingPurposes = ["full", ""] (
+ allowedTokens = ["full", "preview", ""]
+ doc = """Ordered list of material purposes to consider when
+ resolving material bindings in the scene. The empty string
+ indicates the "allPurpose" binding."""
+ )
+ uniform token renderingColorSpace (
+ doc = """Describes a renderer's working (linear) colorSpace where all
+ the renderer/shader math is expected to happen. When no
+ renderingColorSpace is provided, renderer should use its own default."""
+ )
+}
+
+class RenderVar "RenderVar" (
+ inherits =
+ doc = """A UsdRenderVar describes a custom data variable for
+ a render to produce. The prim describes the source of the data, which
+ can be a shader output or an LPE (Light Path Expression), and also
+ allows encoding of (generally renderer-specific) parameters that
+ configure the renderer for computing the variable.
+
+ \\note The name of the RenderVar prim drives the name of the data
+ variable that the renderer will produce.
+
+ \\note In the future, UsdRender may standardize RenderVar representation
+ for well-known variables under the sourceType `intrinsic`,
+ such as _r_, _g_, _b_, _a_, _z_, or _id_.
+ """
+ customData = {
+ string className = "Var"
+ }
+) {
+ uniform token dataType = "color3f" (
+ doc = """The type of this channel, as a USD attribute type."""
+ )
+ uniform string sourceName = "" (
+ doc = """The renderer should look for an output of this name
+ as the computed value for the RenderVar."""
+ )
+ uniform token sourceType = "raw" (
+ doc = """Indicates the type of the source.
+
+ - "raw": The name should be passed directly to the
+ renderer. This is the default behavior.
+ - "primvar": This source represents the name of a primvar.
+ Some renderers may use this to ensure that the primvar
+ is provided; other renderers may require that a suitable
+ material network be provided, in which case this is simply
+ an advisory setting.
+ - "lpe": Specifies a Light Path Expression in the
+ [OSL Light Path Expressions language](https://github.com/imageworks/OpenShadingLanguage/wiki/OSL-Light-Path-Expressions) as the source for
+ this RenderVar. Some renderers may use extensions to
+ that syntax, which will necessarily be non-portable.
+ - "intrinsic": This setting is currently unimplemented,
+ but represents a future namespace for UsdRender to provide
+ portable baseline RenderVars, such as camera depth, that
+ may have varying implementations for each renderer.
+ """
+ allowedTokens = ["raw", "primvar", "lpe", "intrinsic"]
+ )
+}
+
+class RenderProduct "RenderProduct" (
+ inherits =
+ doc = """A UsdRenderProduct describes an image or other
+ file-like artifact produced by a render. A RenderProduct
+ combines one or more RenderVars into a file or interactive
+ buffer. It also provides all the controls established in
+ UsdRenderSettingsBase as optional overrides to whatever the
+ owning UsdRenderSettings prim dictates.
+
+ Specific renderers may support additional settings, such
+ as a way to configure compression settings, filetype metadata,
+ and so forth. Such settings can be encoded using
+ renderer-specific API schemas applied to the product prim.
+ """
+ customData = {
+ string className = "Product"
+ dictionary schemaTokens = {
+ dictionary raster = {
+ string doc = """RenderProduct productType value that
+ indicates a 2D raster image of pixels."""
+ }
+ }
+ }
+) {
+ uniform token productType = "raster" (
+ doc = """The type of output to produce.
+ The default, "raster", indicates a 2D image.
+
+ \\note In the future, UsdRender may define additional product
+ types."""
+ )
+ token productName = "" (
+ doc = """Specifies the name that the output/display driver
+ should give the product. This is provided as-authored to the
+ driver, whose responsibility it is to situate the product on a
+ filesystem or other storage, in the desired location."""
+ )
+ rel orderedVars (
+ doc = """Specifies the RenderVars that should be consumed and
+ combined into the final product. If ordering is relevant to the
+ output driver, then the ordering of targets in this relationship
+ provides the order to use."""
+ )
+}
+
+
+class RenderPass "RenderPass" (
+ inherits =
+ doc = """A RenderPass prim encapsulates the necessary information
+ to generate multipass renders. It houses properties for generating
+ dependencies and the necessary commands to run to generate renders, as
+ well as visibility controls for the scene. While RenderSettings
+ describes the information needed to generate images from a single
+ invocation of a renderer, RenderPass describes the additional information
+ needed to generate a time varying set of images.
+
+ There are two consumers of RenderPass prims - a runtime executable that
+ generates images from usdRender prims, and pipeline specific code that
+ translates between usdRender prims and the pipeline's resource scheduling
+ software. We'll refer to the latter as 'job submission code'.
+
+ \\anchor usdRender_renderVisibility
+ The objects that are relevant to the render is specified via the
+ renderVisibility collection (UsdCollectionAPI) and can be accessed via
+ GetRenderVisibilityCollectionAPI(). This collection has includeRoot set to
+ true so that all objects participate in the render by default. To render
+ only a specific set of objects, there are two options. One is to modify the
+ collection paths to explicitly exclude objects that don't participate in
+ the render, assuming it is known; the other option is to set includeRoot to
+ false and explicitly include the desired objects. These are complementary
+ approaches that may each be preferable depending on the scenario.
+
+ The name of the prim is used as the pass's name.
+ """
+ customData = {
+ string className = "Pass"
+ string extraIncludes = """
+#include "pxr/usd/usd/collectionAPI.h" """
+ }
+ prepend apiSchemas = ["CollectionAPI:renderVisibility"]
+) {
+ uniform token passType (
+ doc = """A string used to categorize differently structured
+ or executed types of passes within a customized pipeline.
+
+ For example, when multiple DCC's (e.g. Houdini, Katana, Nuke)
+ each compute and contribute different Products to a final result,
+ it may be clearest and most flexible to create a separate
+ RenderPass for each.
+ """
+ )
+
+ uniform string[] command (
+ doc = """The command to run in order to generate
+ renders for this pass. The job submission code can use
+ this to properly send tasks to the job scheduling software
+ that will generate products.
+
+ The command can contain variables that will be substituted
+ appropriately during submission, as seen in the example below
+ with {fileName}.
+
+ For example:
+ command[0] = "prman"
+ command[1] = "-progress"
+ command[2] = "-pixelvariance"
+ command[3] = "-0.15"
+ command[4] = "{fileName}" # the fileName property will be substituted
+ """
+ )
+
+ rel renderSource (
+ doc = """The source prim to render from. If _fileName_ is not present,
+ the source is assumed to be a RenderSettings prim present in the current
+ Usd stage. If fileName is present, the source should be found in the
+ file there. This relationship might target a string attribute on this
+ or another prim that identifies the appropriate object in the external
+ container.
+
+ For example, for a Usd-backed pass, this would point to a RenderSettings
+ prim. Houdini passes would point to a Rop. Nuke passes would point to
+ a write node.
+ """
+ )
+
+ rel inputPasses (
+ doc = """The set of other Passes that this Pass depends on
+ in order to be constructed properly. For example, a Pass A
+ may generate a texture, which is then used as an input to
+ Pass B.
+
+ By default, usdRender makes some assumptions about the
+ relationship between this prim and the prims listed in inputPasses.
+ Namely, when per-frame tasks are generated from these pass prims,
+ usdRender will assume a one-to-one relationship between tasks
+ that share their frame number. Consider a pass named 'composite'
+ whose _inputPasses_ targets a Pass prim named 'beauty`.
+ By default, each frame for 'composite' will depend on the
+ same frame from 'beauty':
+ beauty.1 -> composite.1
+ beauty.2 -> composite.2
+ etc
+
+ The consumer of this RenderPass graph of inputs will need to resolve
+ the transitive dependencies.
+ """
+ )
+
+ uniform asset fileName (
+ doc = """The asset that contains the rendering prims or other
+ information needed to render this pass.
+ """
+ )
+
+ uniform bool denoise:enable = false (
+ doc = """When True, this Pass pass should be denoised."""
+ )
+
+ rel denoise:pass (
+ doc = """The The UsdRenderDenoisePass prim from which to
+ source denoise settings.
+ """
+ )
+
+ uniform bool collection:renderVisibility:includeRoot = 1 (
+ customData = {
+ bool apiSchemaOverride = true
+ }
+ )
+}
+
+
+class RenderDenoisePass "RenderDenoisePass" (
+ inherits =
+ doc = """A RenderDenoisePass generates renders via a denoising process.
+ This may be the same renderer that a pipeline uses for UsdRender,
+ or may be a separate one. Notably, a RenderDenoisePass requires
+ another Pass to be present for it to operate. The denoising process
+ itself is not generative, and requires images inputs to operate.
+
+ As denoising integration varies so widely across pipelines, all
+ implementation details are left to pipeline-specific prims
+ that inherit from RenderDenoisePass.
+ """
+ customData = {
+ string className = "DenoisePass"
+ }
+) {
+}
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class NodeGraph "NodeGraph" (
+ doc = '''A node-graph is a container for shading nodes, as well as other
+ node-graphs. It has a public input interface and provides a list of public
+ outputs.
+
+ Node Graph Interfaces
+
+ One of the most important functions of a node-graph is to host the "interface"
+ with which clients of already-built shading networks will interact. Please
+ see "Interface Inputs" for a detailed
+ explanation of what the interface provides, and how to construct and
+ use it, to effectively share/instance shader networks.
+
+ Node Graph Outputs
+
+ These behave like outputs on a shader and are typically connected to an
+ output on a shader inside the node-graph.
+ '''
+)
+{
+}
+
+class Material "Material" (
+ doc = """A Material provides a container into which multiple \"render contexts\"
+ can add data that defines a \"shading material\" for a renderer. Typically
+ this consists of one or more UsdShadeOutput properties connected to outputs
+ of nested Shader prims - though a context/client is free to add
+ any data that is suitable. We strongly advise that all contexts
+ adopt the convention that all properties be prefixed with a namespace
+ that identifies the context e.g. \"token outputs:ri:surface.connect =
+ resolved Material that will be used to shade the gprim (exceptions,
+ of course, for gprims that possess UsdGeomSubsets, as each subset can be
+ shaded by a different Material). A gprim and each of its ancestor
+ prims can possess, through the MaterialBindingAPI, both a
+ direct binding to a Material, and any number of
+ collection-based bindings to Materials; each binding can be generic
+ or declared for a particular purpose, and given a specific binding
+ strength. It is the process of \"material resolution\" (see
+ that examines all of
+ these bindings, and selects the one Material that best matches the
+ client's needs.
+
+ The intent of purpose is that each gprim should be able to resolve a
+ Material for any given purpose, which implies it can have differently bound
+ materials for different purposes. There are two special values of
+ purpose defined in UsdShade, although the API fully supports
+ specifying arbitrary values for it, for the sake of extensibility:
+
UsdShadeTokens->full: to be used when the purpose of the
+ render is entirely to visualize the truest representation of a scene,
+ considering all lighting and material information, at highest fidelity.
+
UsdShadeTokens->preview: to be used when the render is in
+ service of a goal other than a high fidelity \"full\" render (such as scene
+ manipulation, modeling, or realtime playback). Latency and speed are
+ generally of greater concern for preview renders, therefore preview
+ materials are generally designed to be \"lighterweight\" compared to full
+ materials.
+ A binding can also have no specific purpose at all, in which
+ case, it is considered to be the fallback or all-purpose binding (denoted
+ by the empty-valued token UsdShadeTokens->allPurpose).
+
+ The purpose of a material binding is encoded in the name of the
+ binding relationship.
+
+ In the case of a direct binding, the allPurpose binding is
+ represented by the relationship named material:binding.
+ Special-purpose direct bindings are represented by relationships named
+ material:binding:purpose. A direct binding relationship
+ must have a single target path that points to a UsdShadeMaterial.
+
+ In the case of a collection-based binding, the allPurpose binding is
+ represented by a relationship named
+ material:binding:collection:bindingName, where
+ bindingName establishes an identity for the binding that is unique
+ on the prim. Attempting to establish two collection bindings of the same
+ name on the same prim will result in the first binding simply being
+ overridden. A special-purpose collection-based binding is represented by a
+ relationship named material:binding:collection:purpose:bindingName.
+ A collection-based binding relationship must have exacly two targets, one of
+ which should be a collection-path (see
+ and the other should point to a
+ UsdShadeMaterial. In the future, we may allow a single collection
+ binding to target multiple collections, if we can establish a reasonable
+ round-tripping pattern for applications that only allow a single collection
+ to be associated with each Material.
+
+
+
+ Note: Both bindingName and purpose must be
+ non-namespaced tokens. This allows us to know the role of a binding
+ relationship simply from the number of tokens in it.
+
Two tokens: the fallback, \"all purpose\", direct binding,
+ material:binding
+
Three tokens: a purpose-restricted, direct, fallback binding,
+ e.g. material:binding:preview
+
Four tokens: an all-purpose, collection-based binding, e.g.
+ material:binding:collection:metalBits
+
Five tokens: a purpose-restricted, collection-based binding,
+ e.g. material:binding:collection:full:metalBits
+
+
+ A binding-strength value is used to specify whether a binding
+ authored on a prim should be weaker or stronger than bindings that appear
+ lower in namespace. We encode the binding strength with as token-valued
+ metadata 'bindMaterialAs' for future flexibility, even though for
+ now, there are only two possible values:
+ UsdShadeTokens->weakerThanDescendants and
+ UsdShadeTokens->strongerThanDescendants. When binding-strength is
+ not authored (i.e. empty) on a binding-relationship, the default behavior
+ matches UsdShadeTokens->weakerThanDescendants.
+
+ \\note If a material binding relationship is a built-in property defined as
+ part of a typed prim's schema, a fallback value should not be provided for
+ it. This is because the \"material resolution\" algorithm only conisders
+ authored properties.
+ """
+)
+{
+}
+
+class "CoordSysAPI" (
+ doc = '''UsdShadeCoordSysAPI provides a way to designate, name,
+ and discover coordinate systems.
+
+ Coordinate systems are implicitly established by UsdGeomXformable
+ prims, using their local space. That coordinate system may be
+ bound (i.e., named) from another prim. The binding is encoded
+ as a single-target relationship.
+ Coordinate system bindings apply to descendants of the prim
+ where the binding is expressed, but names may be re-bound by
+ descendant prims.
+
+ CoordSysAPI is a multi-apply API schema, where instance names
+ signify the named coordinate systems. The instance names are
+ used with the "coordSys:" namespace to determine the binding
+ to the UsdGeomXformable prim.
+
+ Named coordinate systems are useful in shading (and other) workflows.
+ An example is projection paint, which projects a texture
+ from a certain view (the paint coordinate system), encoded as
+ (e.g.) "rel coordSys:paint:binding". Using the paint coordinate frame
+ avoids the need to assign a UV set to the object, and can be a
+ concise way to project paint across a collection of objects with
+ a single shared paint coordinate system.
+ '''
+)
+{
+ rel coordSys:__INSTANCE_NAME__:binding (
+ displayName = "Bound Coordinate System"
+ doc = "Prim binding expressing the appropriate coordinate systems."
+ )
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "SdfMetadata": {
+ "bindMaterialAs": {
+ "appliesTo": [
+ "relationships"
+ ],
+ "displayGroup": "Shading",
+ "documentation": "Metadata authored on collection-based material binding relationship to indicate the strength of the binding relative to bindings authored on descendant prims.",
+ "type": "token"
+ },
+ "connectability": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": "full",
+ "displayGroup": "Shading",
+ "documentation": "Metadata authored on UsdShadeInput's to specify what they can be connected to. Can be either \"full\" or \"interfaceOnly\". \"full\" implies that the input can be connected to any other input or output. \"interfaceOnly\" implies that the input can only connect to a NodeGraph Input (which represents an interface override, not a render-time dataflow connection), or another Input whose connectability is also \"interfaceOnly\".",
+ "type": "token"
+ },
+ "outputName": {
+ "appliesTo": [
+ "relationships"
+ ],
+ "displayGroup": "deprecated",
+ "type": "token"
+ },
+ "renderType": {
+ "appliesTo": [
+ "properties"
+ ],
+ "displayGroup": "Rendering",
+ "type": "token"
+ },
+ "sdrMetadata": {
+ "appliesTo": [
+ "prims",
+ "attributes"
+ ],
+ "displayGroup": "Shading",
+ "type": "dictionary"
+ }
+ },
+ "Types": {
+ "UsdShadeConnectableAPI": {
+ "alias": {
+ "UsdSchemaBase": "ConnectableAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "nonAppliedAPI"
+ },
+ "UsdShadeCoordSysAPI": {
+ "alias": {
+ "UsdSchemaBase": "CoordSysAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "multipleApplyAPI"
+ },
+ "UsdShadeMaterial": {
+ "alias": {
+ "UsdSchemaBase": "Material"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdShadeNodeGraph"
+ ],
+ "providesUsdShadeConnectableAPIBehavior": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdShadeMaterialBindingAPI": {
+ "alias": {
+ "UsdSchemaBase": "MaterialBindingAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdShadeNodeDefAPI": {
+ "alias": {
+ "UsdSchemaBase": "NodeDefAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdShadeNodeGraph": {
+ "alias": {
+ "UsdSchemaBase": "NodeGraph"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "providesUsdShadeConnectableAPIBehavior": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdShadeShader": {
+ "alias": {
+ "UsdSchemaBase": "Shader"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "providesUsdShadeConnectableAPIBehavior": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdShadeShaderDefParserPlugin": {
+ "bases": [
+ "NdrParserPlugin"
+ ],
+ "displayName": "USD-based shader definition parser plugin"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdShade",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+(
+ "This file describes the USD Shader schemata for code generation."
+ subLayers = [
+ @usd/schema.usda@
+ ]
+)
+
+over "GLOBAL" (
+ customData = {
+ string libraryName = "usdShade"
+ string libraryPath = "pxr/usd/usdShade"
+ dictionary libraryTokens = {
+ dictionary materialBind = {
+ string doc = """The name of the GeomSubset family used to
+ identify face subsets defined for the purpose of binding
+ materials to facesets.
+ """
+ }
+ dictionary full = {
+ string doc= """Possible value for 'connectability' metadata on
+ a UsdShadeInput. When connectability of an input is set to
+ "full", it implies that it can be connected to any input or
+ output.
+ """
+ }
+ dictionary interfaceOnly = {
+ string doc= """Possible value for 'connectability' metadata on
+ a UsdShadeInput. It implies that the input can only connect to
+ a NodeGraph Input (which represents an interface override, not
+ a render-time dataflow connection), or another Input whose
+ connectability is also 'interfaceOnly'.
+ """
+ }
+ dictionary outputs = {
+ string value = "outputs:"
+ string doc = """The prefix on shading attributes
+ denoting an output.
+ """
+ }
+ dictionary inputs = {
+ string value = "inputs:"
+ string doc = """The prefix on shading attributes
+ denoting an input.
+ """
+ }
+ }
+ }
+)
+{
+}
+
+class NodeGraph "NodeGraph" (
+ inherits =
+ doc = """A node-graph is a container for shading nodes, as well as other
+ node-graphs. It has a public input interface and provides a list of public
+ outputs.
+
+ Node Graph Interfaces
+
+ One of the most important functions of a node-graph is to host the "interface"
+ with which clients of already-built shading networks will interact. Please
+ see \\ref UsdShadeNodeGraph_Interfaces "Interface Inputs" for a detailed
+ explanation of what the interface provides, and how to construct and
+ use it, to effectively share/instance shader networks.
+
+ Node Graph Outputs
+
+ These behave like outputs on a shader and are typically connected to an
+ output on a shader inside the node-graph.
+ """
+ customData = {
+ dictionary extraPlugInfo = {
+ bool providesUsdShadeConnectableAPIBehavior = 1
+ }
+ string extraIncludes = '''
+#include
+#include "pxr/usd/usd/editTarget.h"
+#include "pxr/usd/usd/relationship.h"
+#include "pxr/usd/usdShade/input.h"
+#include "pxr/usd/usdShade/output.h"
+#include "pxr/usd/usdShade/shader.h"
+#include "pxr/usd/usdShade/connectableAPIBehavior.h"'''
+ }
+) {
+
+}
+
+
+class Material "Material" (
+ inherits =
+ doc = """A Material provides a container into which multiple "render contexts"
+ can add data that defines a "shading material" for a renderer. Typically
+ this consists of one or more UsdShadeOutput properties connected to outputs
+ of nested Shader prims - though a context/client is free to add
+ any data that is suitable. We strongly advise that all contexts
+ adopt the convention that all properties be prefixed with a namespace
+ that identifies the context e.g. "token outputs:ri:surface.connect =
+ surface output
+ terminal on a UsdShadeMaterial. It is used to define the
+ terminal UsdShadeShader describing the surface of a
+ UsdShadeMaterial.
+ """
+ }
+ dictionary displacement = {
+ string doc = """Describes the displacement output
+ terminal on a UsdShadeMaterial. It is used to define the
+ terminal UsdShadeShader describing the displacement of a
+ UsdShadeMaterial.
+ """
+ }
+ dictionary volume = {
+ string doc = """Describes the volume output
+ terminal on a UsdShadeMaterial. It is used to define the
+ terminal UsdShadeShader describing the volume of a
+ UsdShadeMaterial.
+ """
+ }
+ dictionary universalRenderContext = {
+ string value = ""
+ string doc = """Possible value for the "renderContext" parameter
+ in \\ref UsdShadeMaterial_Outputs API. Represents the universal
+ renderContext. An output with a universal renderContext is
+ applicable to all possible rendering contexts.
+ """
+ }
+ }
+ }
+)
+{
+ token outputs:surface (
+ doc = """Represents the universal "surface" output terminal of a
+ material."""
+ displayGroup = "Outputs"
+ customData = {
+ string apiName = "surface"
+ }
+ )
+ token outputs:displacement (
+ doc = """Represents the universal "displacement" output terminal of a
+ material."""
+ displayGroup = "Outputs"
+ customData = {
+ string apiName = "displacement"
+ }
+ )
+ token outputs:volume (
+ doc = """Represents the universal "volume" output terminal of a
+ material."""
+ displayGroup = "Outputs"
+ customData = {
+ string apiName = "volume"
+ }
+ )
+}
+
+class Shader "Shader" (
+ inherits =
+ prepend apiSchemas = ["NodeDefAPI"]
+ doc = """Base class for all USD shaders. Shaders are the building blocks
+ of shading networks. While UsdShadeShader objects are not target specific,
+ each renderer or application target may derive its own renderer-specific
+ shader object types from this base, if needed.
+
+ Objects of this class generally represent a single shading object, whether
+ it exists in the target renderer or not. For example, a texture, a fractal,
+ or a mix node.
+
+ The UsdShadeNodeDefAPI provides attributes to uniquely identify the
+ type of this node. The id resolution into a renderable shader target
+ type of this node. The id resolution into a renderable shader target
+ is deferred to the consuming application.
+
+ The purpose of representing them in Usd is two-fold:
+ \\li To represent, via "connections" the topology of the shading network
+ that must be reconstructed in the renderer. Facilities for authoring and
+ manipulating connections are encapsulated in the API schema
+ UsdShadeConnectableAPI.
+ \\li To present a (partial or full) interface of typed input parameters
+ whose values can be set and overridden in Usd, to be provided later at
+ render-time as parameter values to the actual render shader objects. Shader
+ input parameters are encapsulated in the property schema UsdShadeInput.
+ """
+ customData = {
+ dictionary extraPlugInfo = {
+ bool providesUsdShadeConnectableAPIBehavior = 1
+ }
+ string extraIncludes = '''
+#include "pxr/usd/usdShade/input.h"
+#include "pxr/usd/usdShade/output.h"
+#include "pxr/usd/usdShade/tokens.h"
+#include "pxr/usd/ndr/declare.h"
+#include "pxr/usd/sdr/shaderNode.h"'''
+ }
+) {
+}
+
+class "NodeDefAPI"
+(
+ inherits =
+ doc = """UsdShadeNodeDefAPI is an API schema that provides attributes
+ for a prim to select a corresponding Shader Node Definition ("Sdr Node"),
+ as well as to look up a runtime entry for that shader node in the
+ form of an SdrShaderNode.
+
+ UsdShadeNodeDefAPI is intended to be a pre-applied API schema for any
+ prim type that wants to refer to the SdrRegistry for further implementation
+ details about the behavior of that prim. The primary use in UsdShade
+ itself is as UsdShadeShader, which is a basis for material shading networks
+ (UsdShadeMaterial), but this is intended to be used in other domains
+ that also use the Sdr node mechanism.
+
+ This schema provides properties that allow a prim to identify an external
+ node definition, either by a direct identifier key into the SdrRegistry
+ (info:id), an asset to be parsed by a suitable NdrParserPlugin
+ (info:sourceAsset), or an inline source code that must also be parsed
+ (info:sourceCode); as well as a selector attribute to determine which
+ specifier is active (info:implementationSource).
+ """
+ customData = {
+ token apiSchemaType = "singleApply"
+ string extraIncludes = """
+#include "pxr/usd/ndr/declare.h"
+#include "pxr/usd/sdr/shaderNode.h"
+ """
+ dictionary schemaTokens = {
+ dictionary universalSourceType = {
+ string value = ""
+ string doc = """Possible value for the "sourceType" parameter
+ in \\ref UsdShadeNodeDefAPI_ImplementationSource API. Represents
+ the universal or fallback source type.
+ """
+ }
+ dictionary sdrMetadata = {
+ string doc = """Dictionary valued metadata key authored on
+ Shader prims with implementationSource value of sourceAsset or
+ sourceCode to pass along metadata to the shader parser or
+ compiler. It is also used to author metadata on shader
+ properties in a UsdShade-based shader definition file.
+ """
+ }
+ dictionary subIdentifier = {
+ string doc = """This identifier is used in conjunction with a
+ specific source asset to indicate a particular definition within
+ the source asset, if the source asset specifies more than one
+ shader node definition.
+ """
+ }
+ }
+ }
+)
+{
+ uniform token info:implementationSource = "id" (
+ allowedTokens = ["id", "sourceAsset", "sourceCode"]
+ doc = """Specifies the attribute that should be consulted to get the
+ shader's implementation or its source code.
+
+ * If set to "id", the "info:id" attribute's value is used to
+ determine the shader source from the shader registry.
+ * If set to "sourceAsset", the resolved value of the "info:sourceAsset"
+ attribute corresponding to the desired implementation (or source-type)
+ is used to locate the shader source. A source asset file may also
+ specify multiple shader definitions, so there is an optional attribute
+ "info:sourceAsset:subIdentifier" whose value should be used to indicate
+ a particular shader definition from a source asset file.
+ * If set to "sourceCode", the value of "info:sourceCode" attribute
+ corresponding to the desired implementation (or source type) is used as
+ the shader source.
+ """
+ customData = {
+ string apiName = "implementationSource"
+ }
+ )
+
+ uniform token info:id (
+ doc = """The id is an identifier for the type or purpose of the
+ shader. E.g.: Texture or FractalFloat.
+ The use of this id will depend on the render context: some will turn it
+ into an actual shader path, some will use it to generate shader source
+ code dynamically.
+
+ \\sa SetShaderId()
+ """
+ customData = {
+ string apiName = "id"
+ }
+ )
+}
+
+class "ConnectableAPI"
+(
+ inherits =
+ doc = """UsdShadeConnectableAPI is an API schema that provides a common
+ interface for creating outputs and making connections between shading
+ parameters and outputs. The interface is common to all UsdShade schemas
+ that support Inputs and Outputs, which currently includes UsdShadeShader,
+ UsdShadeNodeGraph, and UsdShadeMaterial .
+
+ One can construct a UsdShadeConnectableAPI directly from a UsdPrim, or
+ from objects of any of the schema classes listed above. If it seems
+ onerous to need to construct a secondary schema object to interact with
+ Inputs and Outputs, keep in mind that any function whose purpose is either
+ to walk material/shader networks via their connections, or to create such
+ networks, can typically be written entirely in terms of
+ UsdShadeConnectableAPI objects, without needing to care what the underlying
+ prim type is.
+
+ Additionally, the most common UsdShadeConnectableAPI behaviors
+ (creating Inputs and Outputs, and making connections) are wrapped as
+ convenience methods on the prim schema classes (creation) and
+ UsdShadeInput and UsdShadeOutput.
+ """
+ customData = {
+ token apiSchemaType = "nonApplied"
+ string extraIncludes = '''
+#include "pxr/usd/usd/typed.h"
+#include "pxr/usd/usdShade/input.h"
+#include "pxr/usd/usdShade/output.h"
+#include "pxr/usd/usdShade/tokens.h"
+#include "pxr/usd/usdShade/types.h"'''
+ }
+)
+{
+}
+
+class "MaterialBindingAPI"
+(
+ inherits =
+ doc = """UsdShadeMaterialBindingAPI is an API schema that provides an
+ interface for binding materials to prims or collections of prims
+ (represented by UsdCollectionAPI objects).
+
+ In the USD shading model, each renderable gprim computes a single
+ resolved Material that will be used to shade the gprim (exceptions,
+ of course, for gprims that possess UsdGeomSubsets, as each subset can be
+ shaded by a different Material). A gprim and each of its ancestor
+ prims can possess, through the MaterialBindingAPI, both a
+ direct binding to a Material, and any number of
+ collection-based bindings to Materials; each binding can be generic
+ or declared for a particular purpose, and given a specific binding
+ strength. It is the process of "material resolution" (see
+ \\ref UsdShadeMaterialBindingAPI_MaterialResolution) that examines all of
+ these bindings, and selects the one Material that best matches the
+ client's needs.
+
+ The intent of purpose is that each gprim should be able to resolve a
+ Material for any given purpose, which implies it can have differently bound
+ materials for different purposes. There are two special values of
+ purpose defined in UsdShade, although the API fully supports
+ specifying arbitrary values for it, for the sake of extensibility:
+
UsdShadeTokens->full: to be used when the purpose of the
+ render is entirely to visualize the truest representation of a scene,
+ considering all lighting and material information, at highest fidelity.
+
UsdShadeTokens->preview: to be used when the render is in
+ service of a goal other than a high fidelity "full" render (such as scene
+ manipulation, modeling, or realtime playback). Latency and speed are
+ generally of greater concern for preview renders, therefore preview
+ materials are generally designed to be "lighterweight" compared to full
+ materials.
+ A binding can also have no specific purpose at all, in which
+ case, it is considered to be the fallback or all-purpose binding (denoted
+ by the empty-valued token UsdShadeTokens->allPurpose).
+
+ The purpose of a material binding is encoded in the name of the
+ binding relationship.
+
+ In the case of a direct binding, the allPurpose binding is
+ represented by the relationship named material:binding.
+ Special-purpose direct bindings are represented by relationships named
+ material:binding:purpose. A direct binding relationship
+ must have a single target path that points to a UsdShadeMaterial.
+
+ In the case of a collection-based binding, the allPurpose binding is
+ represented by a relationship named
+ material:binding:collection:bindingName, where
+ bindingName establishes an identity for the binding that is unique
+ on the prim. Attempting to establish two collection bindings of the same
+ name on the same prim will result in the first binding simply being
+ overridden. A special-purpose collection-based binding is represented by a
+ relationship named material:binding:collection:purpose:bindingName.
+ A collection-based binding relationship must have exacly two targets, one of
+ which should be a collection-path (see
+ \ref UsdCollectionAPI::GetCollectionPath()) and the other should point to a
+ UsdShadeMaterial. In the future, we may allow a single collection
+ binding to target multiple collections, if we can establish a reasonable
+ round-tripping pattern for applications that only allow a single collection
+ to be associated with each Material.
+
+
+
+ Note: Both bindingName and purpose must be
+ non-namespaced tokens. This allows us to know the role of a binding
+ relationship simply from the number of tokens in it.
+
Two tokens: the fallback, "all purpose", direct binding,
+ material:binding
+
Three tokens: a purpose-restricted, direct, fallback binding,
+ e.g. material:binding:preview
+
Four tokens: an all-purpose, collection-based binding, e.g.
+ material:binding:collection:metalBits
+
Five tokens: a purpose-restricted, collection-based binding,
+ e.g. material:binding:collection:full:metalBits
+
+
+ A binding-strength value is used to specify whether a binding
+ authored on a prim should be weaker or stronger than bindings that appear
+ lower in namespace. We encode the binding strength with as token-valued
+ metadata 'bindMaterialAs' for future flexibility, even though for
+ now, there are only two possible values:
+ UsdShadeTokens->weakerThanDescendants and
+ UsdShadeTokens->strongerThanDescendants. When binding-strength is
+ not authored (i.e. empty) on a binding-relationship, the default behavior
+ matches UsdShadeTokens->weakerThanDescendants.
+
+ \\note If a material binding relationship is a built-in property defined as
+ part of a typed prim's schema, a fallback value should not be provided for
+ it. This is because the "material resolution" algorithm only conisders
+ authored properties.
+ """
+ customData = {
+ string extraIncludes = """
+#include "pxr/usd/usd/collectionAPI.h"
+#include "pxr/usd/usdGeom/subset.h"
+#include "pxr/usd/usdShade/material.h"
+#include """
+ dictionary schemaTokens = {
+ dictionary materialBinding = {
+ string value = "material:binding"
+ string doc = """
+ The relationship name on non-shading prims to
+ denote a binding to a UsdShadeMaterial.
+ """
+ }
+ dictionary materialBindingCollection = {
+ string value = "material:binding:collection"
+ string doc = """
+ The relationship name on non-shading prims to
+ denote a collection-based binding to a UsdShadeMaterial.
+ """
+ }
+ dictionary allPurpose = {
+ string value = ""
+ string doc = """Possible value for the 'materialPurpose' parameter
+ in the various methods available in UsdShadeMaterialBindingAPI.
+ Its value is empty and its purpose is to represent a general
+ purpose material-binding that applies in the absence of a
+ specific-purpose binding.
+ """
+ }
+ dictionary preview = {
+ string doc = """Possible value for the 'materialPurpose'
+ parameter in UsdShadeMaterialBindingAPI, to be used when the
+ render is in service of a goal other than a high fidelity "full"
+ render (such as scene manipulation, modeling, or realtime
+ playback). Latency and speed are generally of greater concern
+ for preview renders, therefore preview materials are generally
+ designed to be "lighterweight" compared to full materials.
+ """
+ }
+
+ dictionary full = {
+ string doc= """Possible value for the 'materialPurpose'
+ parameter in UsdShadeMaterialBindingAPI, to be used when the
+ purpose of the render is entirely about visualizing the truest
+ representation of a scene, considering all lighting and material
+ information, at highest fidelity.
+ """
+ }
+ dictionary bindMaterialAs = {
+ string doc = """Token valued metadata key authored on a material
+ binding relationship to indicate the strength of the binding
+ relative to bindings authored on descendants.
+ """
+ }
+ dictionary weakerThanDescendants = {
+ string doc = """Possible value for 'bindMaterialAs' metadata on the
+ collection-based material binding relationship. Indicates
+ that the binding represented by the relationship is weaker than
+ any bindings authored on the descendants."""
+ }
+ dictionary strongerThanDescendants = {
+ string doc = """Possible value for 'bindMaterialAs' metadata on the
+ collection-based material binding relationship. Indicates
+ that the binding represented by the relationship is stronger than
+ any bindings authored on the descendants."""
+ }
+ dictionary fallbackStrength = {
+ string doc = """Sentinal value to be used for 'bindMaterialAs'
+ metadata's default value. Clients should pass this in for the
+ 'bindingStrength' argument to UsdShadeMaterialBindingAPI::Bind(),
+ if they want to author the default value (weakerThanDescendants)
+ sparsely. The value "fallbackStrength" never gets authored
+ into scene description."""
+ }
+ }
+ }
+)
+{
+
+}
+
+class "CoordSysAPI"
+(
+ inherits =
+ doc = """UsdShadeCoordSysAPI provides a way to designate, name,
+ and discover coordinate systems.
+
+ Coordinate systems are implicitly established by UsdGeomXformable
+ prims, using their local space. That coordinate system may be
+ bound (i.e., named) from another prim. The binding is encoded
+ as a single-target relationship.
+ Coordinate system bindings apply to descendants of the prim
+ where the binding is expressed, but names may be re-bound by
+ descendant prims.
+
+ CoordSysAPI is a multi-apply API schema, where instance names
+ signify the named coordinate systems. The instance names are
+ used with the "coordSys:" namespace to determine the binding
+ to the UsdGeomXformable prim.
+
+ Named coordinate systems are useful in shading (and other) workflows.
+ An example is projection paint, which projects a texture
+ from a certain view (the paint coordinate system), encoded as
+ (e.g.) "rel coordSys:paint:binding". Using the paint coordinate frame
+ avoids the need to assign a UV set to the object, and can be a
+ concise way to project paint across a collection of objects with
+ a single shared paint coordinate system.
+ """
+ customData = {
+ token apiSchemaType = "multipleApply"
+ token propertyNamespacePrefix = "coordSys"
+ string extraIncludes = '''
+#include "pxr/usd/usdGeom/xformable.h"'''
+ }
+)
+{
+ rel binding (
+ displayName = "Bound Coordinate System"
+ doc = "Prim binding expressing the appropriate coordinate systems."
+ )
+}
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ "UsdShadersDiscoveryPlugin" : {
+ "bases": ["NdrDiscoveryPlugin"],
+ "displayName": "USD-based shader definition discovery plugin."
+ }
+ },
+ # Allow usdShaders/shaders to be accessible by hio/glslfx.cpp's
+ # ShaderResourceRegistry
+ "ShaderResources": "shaders"
+ },
+ "LibraryPath": "",
+ "Name": "usdShaders",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+-- glslfx version 0.1
+
+//
+// Copyright 2018 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+
+#import $TOOLS/hdSt/shaders/surfaceHelpers.glslfx
+
+-- configuration
+{
+ "techniques": {
+ "default": {
+ "displacementShader": {
+ "source": [ "Preview.Displacement" ]
+ },
+ "surfaceShader": {
+ "source": [ "Preview.LightStructures",
+ "SurfaceHelpers.Lighting",
+ "Preview.Lighting",
+ "Preview.LightIntegration",
+ "SurfaceHelpers.TangentSpace",
+ "Preview.NormalMapping",
+ "Preview.Surface" ]
+ }
+ }
+ }
+}
+
+
+-- glsl Preview.Displacement
+
+vec4
+displacementShader(int index, vec4 Peye, vec3 Neye, vec4 patchCoord)
+{
+ // Calculate scalar displacement.
+ float texDisplacement = HdGetScalar_displacement(index);
+ return Peye + vec4(Neye*texDisplacement, 0);
+}
+
+
+-- glsl Preview.Surface
+
+vec4
+surfaceShader(vec4 Peye, vec3 Neye, vec4 color, vec4 patchCoord)
+{
+ float clearcoatAmount = HdGetScalar_clearcoat();
+ float clearcoatRoughness = HdGetScalar_clearcoatRoughness();
+ vec3 diffuseColor = HdGet_diffuseColor().xyz;
+ vec3 emissiveColor = HdGet_emissiveColor().xyz;
+ float ior = HdGetScalar_ior();
+ float metallic = HdGetScalar_metallic();
+ float occlusion = HdGetScalar_occlusion();
+ float opacity = HdGetScalar_opacity();
+ float opacityThreshold = HdGetScalar_opacityThreshold();
+ float roughness = HdGetScalar_roughness();
+ vec3 specularColor = HdGet_specularColor().xyz;
+ bool useSpecularWorkflow = (HdGetScalar_useSpecularWorkflow() == 1);
+
+ vec3 clearcoatColor = vec3(1.0);
+ float specularAmount = 1.0;
+
+#ifdef HD_MATERIAL_TAG_MASKED
+ if (opacity < opacityThreshold) {
+ discard;
+ return vec4(1.0);
+ }
+ opacity = 1.0;
+#endif
+
+ // Selection highlighting.
+ vec4 colorAndOpacity = vec4(diffuseColor, opacity);
+ diffuseColor = ApplyColorOverrides(colorAndOpacity).rgb;
+
+ // Pre-multiply diffuse color by opacity if not done so already
+#ifndef diffuseColor_IS_PREMULTIPLIED
+ diffuseColor *= opacity;
+#endif
+
+ // Evaluate all lights.
+ vec3 c = evaluateLights(
+ emissiveColor,
+ diffuseColor,
+ useSpecularWorkflow,
+ ior,
+ metallic,
+ specularAmount,
+ specularColor,
+ roughness,
+ clearcoatAmount,
+ clearcoatColor,
+ clearcoatRoughness,
+ occlusion,
+ Peye,
+ perturbFragmentNormal(Peye.xyz, Neye));
+
+ return vec4(c, colorAndOpacity.a);
+}
+
+
+-- glsl Preview.LightStructures
+
+struct LightingContributions
+{
+ vec3 diffuse;
+ vec3 specular;
+};
+
+
+-- glsl Preview.Lighting
+
+#define EPSILON 0.001
+
+
+float
+SchlickFresnel(float EdotH)
+{
+ return pow(max(0.0, 1.0 - EdotH), 5.0);
+}
+
+float
+NormalDistribution(float specularRoughness, float NdotH)
+{
+ float alpha = specularRoughness * specularRoughness;
+ float alpha2 = alpha * alpha;
+ float NdotH2 = NdotH * NdotH;
+ float DDenom = (NdotH2 * (alpha2 - 1.0)) + 1.0;
+ DDenom *= DDenom;
+ DDenom *= PI;
+ float D = (alpha2 + EPSILON) / DDenom;
+ return D;
+}
+
+float
+Geometric(
+ float specularRoughness,
+ float NdotL,
+ float NdotE,
+ float NdotH,
+ float EdotH)
+{
+ float alpha = specularRoughness * specularRoughness;
+ float k = alpha * 0.5;
+ float G = NdotE / (NdotE * (1.0 - k) + k);
+ G *= NdotL / (NdotL * (1.0 - k) + k);
+ return G;
+}
+
+vec3
+evaluateDirectDiffuse()
+{
+ return vec3(1.0 / PI);
+}
+
+vec3
+evaluateDirectSpecular(
+ vec3 specularColorF0,
+ vec3 specularColorF90,
+ float specularRoughness,
+ float fresnel,
+ float NdotL,
+ float NdotE,
+ float NdotH,
+ float EdotH)
+{
+ vec3 F = mix(specularColorF0, specularColorF90, fresnel);
+ float D = NormalDistribution(specularRoughness, NdotH);
+ float G = Geometric(specularRoughness, NdotL, NdotE, NdotH, EdotH);
+ vec3 RNum = F * G * D;
+ float RDenom = 4.0f * NdotL * NdotE + EPSILON;
+ return RNum / RDenom;
+}
+
+LightingContributions
+evaluateLight(
+ vec3 diffuseColor,
+ bool useSpecularWorkflow,
+ float ior,
+ float metallic,
+ float specularAmount,
+ vec3 specularColor,
+ float specularRoughness,
+ float clearcoatAmount,
+ vec3 clearcoatColor,
+ float clearcoatRoughness,
+ float occlusion,
+ float NdotL,
+ float NdotE,
+ float NdotH,
+ float EdotH,
+ vec3 lightDiffuseIrradiance,
+ vec3 lightSpecularIrradiance)
+{
+ specularRoughness = max(0.001, specularRoughness);
+ clearcoatRoughness = max(0.001, clearcoatRoughness);
+
+ float fresnel = SchlickFresnel(EdotH);
+
+ // Evaluate diffuse
+ vec3 d = diffuseColor * evaluateDirectDiffuse();
+
+ // Evaluate specular first lobe
+ vec3 s1 = vec3(0.0);
+ const float R = (1.0 - ior) / (1.0 + ior);
+ if (specularAmount > 0.0) {
+ vec3 F0 = specularColor;
+ vec3 F90 = vec3(1.0);
+
+ if (!useSpecularWorkflow) {
+ vec3 specColor = mix(vec3(1.0), diffuseColor, metallic);
+ F0 = mix(R * R * specColor, specColor, metallic);
+ F90 = specColor;
+
+ // For metallic workflows, pure metals have no diffuse
+ d *= 1.0 - metallic;
+ }
+
+ s1 = specularAmount * evaluateDirectSpecular(
+ F0, // Specular color 0
+ F90, // Specular color 90
+ specularRoughness, // Roughness
+ fresnel, // Fresnel
+ NdotL, NdotE, NdotH, EdotH); // Dot products needed for lights
+
+ // Adjust the diffuse so glazing angles have less diffuse
+ d *= (1.0 - mix(F0, F90, fresnel));
+ }
+
+ // Evaluate clearcoat
+ vec3 s2 = vec3(0.0);
+ if (clearcoatAmount > 0.0) {
+ s2 = clearcoatAmount * evaluateDirectSpecular(
+ R * R * clearcoatColor, // Clearcoat color 0
+ clearcoatColor, // Clearcoat color 90
+ clearcoatRoughness, // Roughness
+ fresnel, // Fresnel
+ NdotL, NdotE, NdotH, EdotH); // Dot products needed for lights
+ }
+
+ LightingContributions lightingContrib;
+
+ lightingContrib.diffuse =
+ occlusion * NdotL * d * lightDiffuseIrradiance;
+
+ lightingContrib.specular =
+ occlusion * NdotL * (s1 + s2) * lightSpecularIrradiance;
+
+ return lightingContrib;
+}
+
+
+-- glsl Preview.LightIntegration
+
+mat4 GetDomeLightTransform(mat4 worldToLightTransform)
+{
+ // transform from view space to light space
+ mat4 worldToViewInverse = GetWorldToViewInverseMatrix();
+ return worldToLightTransform * worldToViewInverse;
+}
+
+LightingContributions
+evaluateIndirectLighting(
+ vec3 diffuseColor,
+ vec3 specularColor,
+ vec3 Neye,
+ vec3 Reye,
+ float NdotE,
+ float EdotH,
+ float ior,
+ float metallic,
+ float occlusion,
+ float roughness,
+ bool useSpecularWorkflow,
+ float clearcoatAmount,
+ vec3 clearcoatColor,
+ float clearcoatRoughness,
+ mat4 worldToLightTransform)
+{
+ LightingContributions indirect;
+ indirect.diffuse = vec3(0.0);
+ indirect.specular = vec3(0.0);
+
+#ifdef HD_HAS_domeLightIrradiance
+ vec3 F0 = specularColor;
+ vec3 F90 = vec3(1.0);
+ vec3 d = diffuseColor;
+ const float R = (1.0 - ior) / (1.0 + ior);
+ if (!useSpecularWorkflow) {
+ vec3 specColor = mix(vec3(1.0), diffuseColor, metallic);
+ F0 = mix(R * R * specColor, specColor, metallic);
+ F90 = specColor;
+
+ // For metallic workflows, pure metals have no diffuse
+ d *= 1.0 - metallic;
+ }
+ // Adjust the diffuse so glazing angles have less diffuse
+ float fresnel = SchlickFresnel(EdotH);
+ vec3 F = mix(F0, F90, fresnel);
+ d *= (1.0 - F);
+
+ mat4 transformationMatrix = GetDomeLightTransform(worldToLightTransform);
+
+ // Diffuse Component
+ vec3 dir = normalize((transformationMatrix * vec4(Neye,0.0)).xyz);
+ vec2 coord = ProjectToLatLong(dir);
+ vec3 diffuse = HdGet_domeLightIrradiance(coord).rgb;
+
+ // Specular Component
+ const float MAX_REFLECTION_LOD =
+ textureQueryLevels(HdGetSampler_domeLightPrefilter());
+ float lod = roughness * MAX_REFLECTION_LOD;
+ vec3 Rdir = normalize((transformationMatrix * vec4(Reye,0.0)).xyz);
+ vec2 Rcoord = ProjectToLatLong(Rdir);
+ vec3 prefilter = HdTextureLod_domeLightPrefilter(Rcoord, lod).rgb;
+
+ vec2 brdf = HdGet_domeLightBRDF(vec2(NdotE, roughness)).rg;
+
+ vec3 specular = prefilter * (F * brdf.x + brdf.y);
+
+ // Clearcoat Component
+ vec3 clearcoat = vec3(0.0);
+ if (clearcoatAmount > 0.0) {
+ const vec3 clearcoatF = clearcoatAmount * mix(
+ R * R * clearcoatColor, // Clearcoat F0
+ clearcoatColor, // Clearcoat F90
+ fresnel);
+ lod = clearcoatRoughness * MAX_REFLECTION_LOD;
+ prefilter = HdTextureLod_domeLightPrefilter(Rcoord, lod).rgb;
+ clearcoat = prefilter * (clearcoatF * brdf.x + brdf.y);
+ }
+
+ // Indirect Lighting
+ indirect.diffuse = (d * diffuse) * occlusion;
+ indirect.specular = (specular + clearcoat) * occlusion;
+
+#endif
+
+ return indirect;
+}
+
+vec3
+evaluateLights(
+ vec3 emissiveColor,
+ vec3 diffuseColor,
+ bool useSpecularWorkflow,
+ float ior,
+ float metallic,
+ float specularAmount,
+ vec3 specularColor,
+ float specularRoughness,
+ float clearcoatAmount,
+ vec3 clearcoatColor,
+ float clearcoatRoughness,
+ float occlusion,
+ vec4 Peye,
+ vec3 Neye)
+{
+ vec3 n = Neye;
+ vec3 e = normalize(-Peye.xyz);
+ float NdotE = max(0.0, dot(n, e));
+
+ vec3 Reye = reflect(-e, n);
+
+ vec3 directLight = vec3(0.0);
+ vec3 indirectLight = vec3(0.0);
+
+#if NUM_LIGHTS > 0
+ for (int i = 0; i < NUM_LIGHTS; ++i) {
+ LightSource light = GetLightSource(i);
+
+ // Calculate necessary vector information for lighting
+ vec4 Plight = light.isIndirectLight
+ ? vec4(0,0,0,1)
+ : light.position;
+ vec3 l = (Plight.w == 0.0)
+ ? normalize(Plight.xyz)
+ : normalize(Plight - Peye).xyz;
+ vec3 h = normalize(e + l);
+ float NdotL = max(0.0, dot(n, l));
+ float NdotH = max(0.0, dot(n, h));
+ float EdotH = max(0.0, dot(e, h));
+
+ // Calculate light intensity
+ float atten = lightDistanceAttenuation(Peye, i);
+ float spot = lightSpotAttenuation(l, i);
+
+ // Calculate the shadow factor
+ float shadow = 1.0;
+ #if USE_SHADOWS
+ shadow = light.hasShadow ?
+ shadowing(/*lightIndex=*/i, Peye) : 1.0;
+ #endif
+
+ float intensity = atten * spot * shadow;
+
+ vec3 lightDiffuseIrradiance = intensity * light.diffuse.rgb;
+ vec3 lightSpecularIrradiance = intensity * light.specular.rgb;
+
+ LightingContributions lightingContrib = evaluateLight(
+ diffuseColor,
+ useSpecularWorkflow,
+ ior,
+ metallic,
+ specularAmount,
+ specularColor,
+ specularRoughness,
+ clearcoatAmount,
+ clearcoatColor,
+ clearcoatRoughness,
+ occlusion,
+ NdotL,
+ NdotE,
+ NdotH,
+ EdotH,
+ lightDiffuseIrradiance,
+ lightSpecularIrradiance);
+
+ // calculate the indirect light (DomeLight)
+ if (light.isIndirectLight) {
+
+ LightingContributions indirectLightContrib =
+ evaluateIndirectLighting(diffuseColor, specularColor,
+ Neye, Reye, NdotE, EdotH, ior, metallic, occlusion,
+ specularRoughness, useSpecularWorkflow,
+ clearcoatAmount, clearcoatColor, clearcoatRoughness,
+ light.worldToLightTransform);
+ indirectLight = (indirectLightContrib.diffuse * light.diffuse.rgb
+ + indirectLightContrib.specular * light.specular.rgb);
+ }
+ // all other light sources contribute to the direct lighting
+ else {
+ directLight += (lightingContrib.diffuse + lightingContrib.specular);
+ }
+ }
+#endif
+
+ return (emissiveColor + directLight + indirectLight);
+}
+
+
+-- glsl Preview.NormalMapping
+
+vec3
+perturbFragmentNormal(vec3 P, vec3 N)
+{
+#ifdef HD_HAS_COORD_normal
+ vec3 Nt = normalize(HdGet_normal().xyz);
+ vec2 st = HdGetCoord_normal().xy;
+ return PerturbNormal(P, N, st, Nt);
+#endif
+ return N;
+}
+
+
+-- glslfx version 0.1
+
+//
+// Copyright 2018 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+
+-- configuration
+{
+ "techniques": {
+ "default": {
+ }
+ }
+}
+
+-- This file is intentionally empty.
+#usda 1.0
+
+def Shader "UsdPreviewSurface" (
+ doc = "Preview surface specification"
+)
+{
+ uniform token info:id = "UsdPreviewSurface"
+ uniform token info:implementationSource = "sourceAsset"
+
+ # XXX: This doesn't resolve if it's defined as a search path.
+ # Might require special resolver configuration, to include the resources
+ # directory.
+ uniform asset info:glslfx:sourceAsset = @./previewSurface.glslfx@
+
+ # Outputs
+ token outputs:surface (
+ sdrMetadata = {
+ string renderType = "terminal surface"
+ }
+ )
+ token outputs:displacement (
+ sdrMetadata = {
+ string renderType = "terminal displacement"
+ }
+ )
+
+ # Inputs
+ color3f inputs:diffuseColor = (0.18, 0.18, 0.18) (
+ doc = """Parameter used as diffuseColor when using the specular
+ workflow, when using metallic workflow this is interpreted
+ as albedo."""
+ )
+
+ color3f inputs:emissiveColor = (0.0, 0.0, 0.0) (
+ doc = """Emissive component."""
+ )
+
+ int inputs:useSpecularWorkflow = 0 (
+ connectability = "interfaceOnly"
+ doc = """This node can fundamentally operate in two modes :
+ Specular workflow where you provide a texture/value to the
+ "specularColor" input. Or, Metallic workflow where you
+ provide a texture/value to the "metallic" input."""
+ )
+
+ color3f inputs:specularColor = (0.0, 0.0, 0.0) (
+ doc = """Used only in the specular workflow.
+ Specular color to be used.
+ This is the color at 0 incidence. Edge color is assumed white.
+ Transition between the two colors according to Schlick fresnel
+ approximation."""
+ )
+
+ float inputs:metallic = 0.0 (
+ doc = """Used only in the metalness workflow.
+ 1 for metallic surfaces and 0 for non-metallic.
+ - If metallic is 1, then both F0 (reflectivity at 0 degree
+ incidence) and edge F90 reflectivity will simply be the Albedo.
+ - If metallic is 0, then Albedo is ignored in the calculation of F0
+ and F90; F0 is derived from ior via ( (1-ior)/(1+ior) )^2 and F90 is
+ white.
+ In between, we interpolate."""
+ )
+
+ float inputs:roughness = 0.5 (
+ doc = """Roughness for the specular lobe. The value ranges from 0 to 1,
+ which goes from a perfectly specular surface at 0.0 to maximum roughness
+ of the specular lobe. This value is usually squared before use with a
+ GGX or Beckmann lobe."""
+ )
+
+ float inputs:clearcoat = 0.0 (
+ doc = """Second specular lobe amount. The color is white."""
+ )
+
+ float inputs:clearcoatRoughness = 0.01 (
+ doc = """Roughness for the second specular lobe."""
+ )
+
+ float inputs:opacity = 1.0 (
+ doc = """Opacity of the material."""
+ )
+
+ float inputs:opacityThreshold = 0.0 (
+ connectability = "interfaceOnly"
+ doc = """Threshold used to determine opacity values that will be
+ considered fully transparent. A value of 0.0 indicates that no masking
+ is applied to the opacity input, while a value greater than 0.0 indicates
+ that rendering of the surface is limited to the areas where the opacity
+ is greater or equal to that value. Note that when opacityThreshold is
+ greater than zero, the opacity values less than the opacityThreshold will
+ not be rendered, and the opacity values greater than or equal to the
+ opacityThreshold will be fully visible."""
+ )
+
+ float inputs:ior = 1.5 (
+ doc = """Index of Refraction to be used for translucent objects."""
+ )
+
+ normal3f inputs:normal = (0.0, 0.0, 1.0) (
+ doc = """Expects normal in tangent space [(-1,-1,-1), (1,1,1)]
+ This means your texture reader implementation should provide
+ data to this node that is properly scaled and ready
+ to be consumed as a tangent space normal.
+ If the texture has 8 bits per component, then scale and bias must be
+ adjusted to be (2.0, 2.0, 2.0, 1.0) and (-1, -1, -1, 0) respectively
+ in order to satisfy tangent space requirements.
+ Normal map data is commonly expected to be linearly encoded.
+ However, many image-writing tools automatically set the profile of
+ three-channel, 8-bit images to SRGB. To prevent an unwanted
+ transformation, the sourceColorSpace must also be set to "raw"."""
+ )
+
+ float inputs:displacement = 0.0 (
+ doc = """Displacement in the direction of the normal. """
+ )
+
+ float inputs:occlusion = 1.0 (
+ doc = """Occlusion signal. This provides extra information about the
+ occlusion of different parts of the mesh that this material is applied
+ to. Occlusion only makes sense as a surface-varying signal, and
+ pathtracers will likely choose to ignore it. An occlusion value of 0.0
+ means the surface point is fully occluded by other parts of the surface,
+ and a value of 1.0 means the surface point is completely unoccluded by
+ other parts of the surface. """
+ )
+}
+
+def Shader "UsdUVTexture" (
+ doc = """Texture Node Specification represents a node that can be used to
+ read UV textures, including tiled textures such as Mari UDIM's.
+
+ Reads from a texture file and outputs one or more values. If the texture has
+ 8 bits per component, [0, 255] values will first be converted to floating
+ point in the range [0, 1] and then any transformations (bias, scale)
+ indicated are applied. Otherwise any indicated transformation (bias,
+ scale) is just applied. If a single-channel texture is fed into a
+ UsdUVTexture, the r, g, and b components of the rgb output will repeat the
+ channel's value, while the single 'a' output will be set to 1.0. If a
+ two-channel texture is fed into a UsdUVTexture, the r, g, and b components
+ of the rgb output will repeat the first channel's value, while the single
+ 'a' output will be set to the second channel's value. If a three-channel
+ texture is fed into a UsdUVTexture, the r, g, and b components of the rgb
+ outputs will contain the assigned texture channel's value, while the single
+ 'a' output will be set to 1.0.
+"""
+ sdrMetadata = {
+ token role = "texture"
+ }
+)
+{
+ uniform token info:id = "UsdUVTexture"
+ uniform token info:implementationSource = "sourceAsset"
+ uniform asset info:glslfx:sourceAsset = @./uvTexture.glslfx@
+
+ asset inputs:file = @@ (
+ connectability = "interfaceOnly"
+ doc = """Path to the texture this node uses."""
+ )
+
+ float2 inputs:st = (0.0, 0.0) (
+ doc = """This input provides the texture coordinates. It is usually
+ connected to a (primvar) node that will provide the texture
+ coords."""
+ )
+
+ token inputs:wrapS = "useMetadata" (
+ allowedTokens = ["black", "clamp", "repeat", "mirror", "useMetadata"]
+ connectability = "interfaceOnly"
+ doc = """ black, clamp, repeat, mirror, useMetadata."""
+ )
+
+ token inputs:wrapT = "useMetadata" (
+ allowedTokens = ["black", "clamp", "repeat", "mirror", "useMetadata"]
+ connectability = "interfaceOnly"
+ doc = """ black, clamp, repeat, mirror, useMetadata."""
+ )
+
+ float4 inputs:fallback = (0.0, 0.0, 0.0, 1.0) (
+ doc = """Fallback value to be used when no texture is connected."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ float4 inputs:scale = (1.0, 1.0, 1.0, 1.0) (
+ connectability = "interfaceOnly"
+ doc = """Scale to be applied to all components of the texture.
+ value * scale + bias.
+ Please see UsdPreviewSurface.inputs:normal for special
+ considerations for reading normal maps."""
+ )
+
+ float4 inputs:bias = (0.0, 0.0, 0.0, 0.0) (
+ connectability = "interfaceOnly"
+ doc = """Bias to be applied to all components of the texture.
+ value * scale + bias).
+ Please see UsdPreviewSurface.inputs:normal for special
+ considerations for reading normal maps."""
+ )
+
+ token inputs:sourceColorSpace = "auto" (
+ connectability = "interfaceOnly"
+ allowedTokens = ["raw", "sRGB", "auto"]
+ doc = """ raw, sRGB, auto. Flag indicating the color
+ space in which the source texture is encoded. If set to sRGB,
+ the texture will be read using the sRGB transfer curve, but
+ not filtered against the sRGB gamut. Please see
+ UsdPreviewSurface.inputs:normal for special considerations for
+ reading normal maps."""
+ )
+
+ float outputs:r (
+ doc = "Outputs the red channel."
+ sdrMetadata = {
+ token swizzle = "x"
+ }
+ )
+
+ float outputs:g (
+ doc = "Outputs the green channel."
+ sdrMetadata = {
+ token swizzle = "y"
+ }
+ )
+
+ float outputs:b (
+ doc = "Outputs the blue channnel."
+ sdrMetadata = {
+ token swizzle = "z"
+ }
+ )
+
+ float outputs:a (
+ doc = "Outputs the alpha channnel."
+ sdrMetadata = {
+ token swizzle = "w"
+ }
+ )
+
+ float3 outputs:rgb (
+ doc = "Outputs the red, green and blue channels."
+ sdrMetadata = {
+ token swizzle = "xyz"
+ }
+ )
+}
+
+class "UsdPrimvarReader" (
+ sdrMetadata = {
+ token role = "primvar"
+ }
+)
+{
+ uniform token info:implementationSource = "sourceAsset"
+ uniform asset info:glslfx:sourceAsset = @./primvarReader.glslfx@
+ string inputs:varname = "" (
+ connectability = "interfaceOnly"
+ doc = """Name of the primvar to be fetched from the geometry."""
+ sdrMetadata = {
+ token primvarProperty = "1"
+ }
+ )
+}
+
+def Shader "UsdPrimvarReader_float" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_float"
+
+ float inputs:fallback = 0.0 (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ float outputs:result
+}
+
+def Shader "UsdPrimvarReader_float2" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_float2"
+
+ float2 inputs:fallback = (0.0, 0.0) (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ float2 outputs:result
+}
+
+def Shader "UsdPrimvarReader_float3" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_float3"
+
+ float3 inputs:fallback = (0.0, 0.0, 0.0) (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ float3 outputs:result
+}
+
+def Shader "UsdPrimvarReader_float4" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_float4"
+
+ float4 inputs:fallback = (0.0, 0.0, 0.0, 0.0) (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ float4 outputs:result
+}
+
+def Shader "UsdPrimvarReader_int" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_int"
+
+ int inputs:fallback = 0 (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ int outputs:result
+}
+
+def Shader "UsdPrimvarReader_string" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_string"
+
+ string inputs:fallback = "" (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ string outputs:result
+}
+
+def Shader "UsdPrimvarReader_normal" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_normal"
+
+ normal3f inputs:fallback = (0.0, 0.0, 0.0) (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ normal3f outputs:result
+}
+
+def Shader "UsdPrimvarReader_point" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_point"
+
+ point3f inputs:fallback = (0.0, 0.0, 0.0) (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ point3f outputs:result
+}
+
+def Shader "UsdPrimvarReader_vector" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_vector"
+
+ vector3f inputs:fallback = (0.0, 0.0, 0.0) (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ vector3f outputs:result
+}
+
+def Shader "UsdPrimvarReader_matrix" (
+ inherits =
+)
+{
+ uniform token info:id = "UsdPrimvarReader_matrix"
+
+ matrix4d inputs:fallback = ( (1, 0, 0, 0), (0, 1, 0, 0), (0, 0, 1, 0), (0, 0, 0, 1) ) (
+ doc = """Fallback value to be returned when fetch failed."""
+ sdrMetadata = {
+ token defaultInput = "1"
+ }
+ )
+
+ matrix4d outputs:result
+}
+
+def Shader "UsdTransform2d" (
+ doc = """Transform 2d represents a node that can be used to
+ transform 2d data (for instance, texture coordinates).
+ The node applies the following transformation :
+ in * scale * rotate + translation"""
+ sdrMetadata = {
+ token role = "math"
+ }
+)
+{
+ uniform token info:id = "UsdTransform2d"
+ uniform token info:implementationSource = "sourceAsset"
+ uniform asset info:glslfx:sourceAsset = @./transform2d.glslfx@
+
+ float2 inputs:in = (0.0, 0.0) (
+ doc = """This input provides the data. It is usually
+ connected to a UsdPrimvarReader_float2 that
+ will provide the data."""
+ )
+
+ float inputs:rotation = 0.0 (
+ connectability = "interfaceOnly"
+ doc = """Counter-clockwise rotation in degrees around the origin to be applied
+ to all components of the data."""
+ )
+
+ float2 inputs:scale = (1.0, 1.0) (
+ connectability = "interfaceOnly"
+ doc = """Scale around the origin to be applied to all components of the data."""
+ )
+
+ float2 inputs:translation = (0.0, 0.0) (
+ connectability = "interfaceOnly"
+ doc = """Translation to be applied to all components of the data."""
+ )
+
+ float2 outputs:result (
+ doc = "Outputs transformed float2 values."
+ )
+}
+-- glslfx version 0.1
+
+//
+// Copyright 2020 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+
+-- configuration
+{
+ "techniques": {
+ "default": {
+ }
+ }
+}
+
+-- This file is intentionally empty.
+-- glslfx version 0.1
+
+//
+// Copyright 2018 Pixar
+//
+// Licensed under the Apache License, Version 2.0 (the "Apache License")
+// with the following modification; you may not use this file except in
+// compliance with the Apache License and the following modification to it:
+// Section 6. Trademarks. is deleted and replaced with:
+//
+// 6. Trademarks. This License does not grant permission to use the trade
+// names, trademarks, service marks, or product names of the Licensor
+// and its affiliates, except as required to comply with Section 4(c) of
+// the License and to reproduce the content of the NOTICE file.
+//
+// You may obtain a copy of the Apache License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the Apache License with the above modification is
+// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the Apache License for the specific
+// language governing permissions and limitations under the Apache License.
+//
+
+-- configuration
+{
+ "techniques": {
+ "default": {
+ }
+ }
+}
+
+-- This file is intentionally empty.
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class SkelRoot "SkelRoot" (
+ doc = '''Boundable prim type used to identify a scope beneath which
+ skeletally-posed primitives are defined.
+
+ A SkelRoot must be defined at or above a skinned primitive for any skinning
+ behaviors in UsdSkel.
+
+ See the extended "Skel Root Schema" documentation for
+ more information.'''
+)
+{
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Skeleton "Skeleton" (
+ doc = '''Describes a skeleton.
+
+ See the extended "Skeleton Schema" documentation for
+ more information.
+ '''
+)
+{
+ uniform matrix4d[] bindTransforms (
+ doc = """Specifies the bind-pose transforms of each joint in
+ **world space**, in the ordering imposed by *joints*."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ uniform token[] jointNames (
+ doc = """If authored, provides a unique name per joint. This may be
+ optionally set to provide better names when translating to DCC apps
+ that require unique joint names."""
+ )
+ uniform token[] joints (
+ doc = """An array of path tokens identifying the set of joints that make
+ up the skeleton, and their order. Each token in the array must be valid
+ when parsed as an SdfPath. The parent-child relationships of the
+ corresponding paths determine the parent-child relationships of each
+ joint. It is not required that the name at the end of each path be
+ unique, but rather only that the paths themselves be unique."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ uniform matrix4d[] restTransforms (
+ doc = """Specifies the rest-pose transforms of each joint in
+ **local space**, in the ordering imposed by *joints*. This provides
+ fallback values for joint transforms when a Skeleton either has no
+ bound animation source, or when that animation source only contains
+ animation for a subset of a Skeleton's joints."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class SkelAnimation "SkelAnimation" (
+ doc = '''Describes a skel animation, where joint animation is stored in a
+ vectorized form.
+
+ See the extended "Skel Animation"
+ documentation for more information.
+ '''
+)
+{
+ uniform token[] blendShapes (
+ doc = """Array of tokens identifying which blend shapes this
+ animation's data applies to. The tokens for blendShapes correspond to
+ the tokens set in the *skel:blendShapes* binding property of the
+ UsdSkelBindingAPI."""
+ )
+ float[] blendShapeWeights (
+ doc = """Array of weight values for each blend shape. Each weight value
+ is associated with the corresponding blend shape identified within the
+ *blendShapes* token array, and therefore must have the same length as
+ *blendShapes."""
+ )
+ uniform token[] joints (
+ doc = """Array of tokens identifying which joints this animation's
+ data applies to. The tokens for joints correspond to the tokens of
+ Skeleton primitives. The order of the joints as listed here may
+ vary from the order of joints on the Skeleton itself."""
+ )
+ quatf[] rotations (
+ doc = """Joint-local unit quaternion rotations of all affected joints,
+ in 32-bit precision. Array length should match the size of the
+ *joints* attribute."""
+ )
+ half3[] scales (
+ doc = """Joint-local scales of all affected joints, in
+ 16 bit precision. Array length should match the size of the *joints*
+ attribute."""
+ )
+ float3[] translations (
+ doc = """Joint-local translations of all affected joints. Array length
+ should match the size of the *joints* attribute."""
+ )
+}
+
+class "SkelBindingAPI" (
+ doc = '''Provides API for authoring and extracting all the skinning-related
+ data that lives in the "geometry hierarchy" of prims and models that want
+ to be skeletally deformed.
+
+ See the extended "UsdSkelBindingAPI schema"
+ documentation for more about bindings and how they apply in a scene graph.
+ '''
+)
+{
+ matrix4d primvars:skel:geomBindTransform (
+ doc = '''Encodes the bind-time world space transforms of the prim.
+ If the transform is identical for a group of gprims that share a common
+ ancestor, the transform may be authored on the ancestor, to "inherit"
+ down to all the leaf gprims. If this transform is unset, an identity
+ transform is used instead.'''
+ )
+ int[] primvars:skel:jointIndices (
+ doc = """Indices into the *joints* attribute of the closest
+ (in namespace) bound Skeleton that affect each point of a PointBased
+ gprim. The primvar can have either *constant* or *vertex* interpolation.
+ This primvar's *elementSize* will determine how many joint influences
+ apply to each point. Indices must point be valid. Null influences should
+ be defined by setting values in jointWeights to zero.
+ See UsdGeomPrimvar for more information on interpolation and
+ elementSize."""
+ )
+ float[] primvars:skel:jointWeights (
+ doc = """Weights for the joints that affect each point of a PointBased
+ gprim. The primvar can have either *constant* or *vertex* interpolation.
+ This primvar's *elementSize* will determine how many joints influences
+ apply to each point. The length, interpolation, and elementSize of
+ *jointWeights* must match that of *jointIndices*. See UsdGeomPrimvar
+ for more information on interpolation and elementSize."""
+ )
+ uniform token primvars:skel:skinningMethod = "classicLinear" (
+ allowedTokens = ["classicLinear", "dualQuaternion"]
+ doc = "The skinningMethod specifies the skinning method for the prim."
+ )
+ rel skel:animationSource (
+ doc = """Animation source to be bound to Skeleton primitives at or
+ beneath the location at which this property is defined.
+ """
+ )
+ uniform token[] skel:blendShapes (
+ doc = """An array of tokens defining the order onto which blend shape
+ weights from an animation source map onto the *skel:blendShapeTargets*
+ rel of a binding site. If authored, the number of elements must be equal
+ to the number of targets in the _blendShapeTargets_ rel. This property
+ is not inherited hierarchically, and is expected to be authored directly
+ on the skinnable primitive to which the blend shapes apply."""
+ )
+ rel skel:blendShapeTargets (
+ doc = """Ordered list of all target blend shapes. This property is not
+ inherited hierarchically, and is expected to be authored directly on
+ the skinnable primitive to which the the blend shapes apply."""
+ )
+ uniform token[] skel:joints (
+ doc = """An (optional) array of tokens defining the list of
+ joints to which jointIndices apply. If not defined, jointIndices applies
+ to the ordered list of joints defined in the bound Skeleton's *joints*
+ attribute. If undefined on a primitive, the primitive inherits the
+ value of the nearest ancestor prim, if any."""
+ )
+ rel skel:skeleton (
+ doc = """Skeleton to be bound to this prim and its descendents that
+ possess a mapping and weighting to the joints of the identified
+ Skeleton."""
+ )
+}
+
+class BlendShape "BlendShape" (
+ doc = '''Describes a target blend shape, possibly containing inbetween
+ shapes.
+
+ See the extended "Blend Shape Schema
+ documentation for information.
+ '''
+)
+{
+ uniform vector3f[] normalOffsets (
+ doc = """**Required property**. Normal offsets which, when added to the
+ base pose, provides the normals of the target shape."""
+ )
+ uniform vector3f[] offsets (
+ doc = """**Required property**. Position offsets which, when added to the
+ base pose, provides the target shape."""
+ )
+ uniform int[] pointIndices (
+ doc = """**Optional property**. Indices into the original mesh that
+ correspond to the values in *offsets* and of any inbetween shapes. If
+ authored, the number of elements must be equal to the number of elements
+ in the *offsets* array."""
+ )
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "SdfMetadata": {
+ "weight": {
+ "appliesTo": [
+ "attributes"
+ ],
+ "default": 0,
+ "displayGroup": "BlendShape",
+ "documentation": "The weight value at which an inbeteen shape is applied.",
+ "type": "float"
+ }
+ },
+ "Types": {
+ "UsdSkelAnimation": {
+ "alias": {
+ "UsdSchemaBase": "SkelAnimation"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdSkelBindingAPI": {
+ "alias": {
+ "UsdSchemaBase": "SkelBindingAPI"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdAPISchemaBase"
+ ],
+ "schemaKind": "singleApplyAPI"
+ },
+ "UsdSkelBlendShape": {
+ "alias": {
+ "UsdSchemaBase": "BlendShape"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdTyped"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdSkelRoot": {
+ "alias": {
+ "UsdSchemaBase": "SkelRoot"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomBoundable"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ },
+ "UsdSkelSkeleton": {
+ "alias": {
+ "UsdSchemaBase": "Skeleton"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomBoundable"
+ ],
+ "implementsComputeExtent": true,
+ "schemaKind": "concreteTyped"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdSkel",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+(
+ """ This file contains a schema for supporting skeletal animations in USD.
+ """
+ subLayers = [
+ @usdGeom/schema.usda@
+ ]
+)
+
+over "GLOBAL" (
+ customData = {
+ string libraryName = "usdSkel"
+ string libraryPath = "pxr/usd/usdSkel"
+
+ dictionary libraryTokens = {
+ dictionary weight = {
+ string doc = """UsdSkelInbetweenShape - The weight location at
+ which the inbetween shape applies."""
+ }
+ }
+ }
+) {
+}
+
+
+class SkelRoot "SkelRoot" (
+ inherits =
+ doc = """Boundable prim type used to identify a scope beneath which
+ skeletally-posed primitives are defined.
+
+ A SkelRoot must be defined at or above a skinned primitive for any skinning
+ behaviors in UsdSkel.
+
+ See the extended \\ref UsdSkel_SkelRoot "Skel Root Schema" documentation for
+ more information."""
+ customData = {
+ string className = "Root"
+
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+ }
+) {
+}
+
+
+class Skeleton "Skeleton" (
+ inherits =
+ doc = """Describes a skeleton.
+
+ See the extended \\ref UsdSkel_Skeleton "Skeleton Schema" documentation for
+ more information.
+ """
+ customData = {
+ dictionary extraPlugInfo = {
+ bool implementsComputeExtent = true
+ }
+
+ string extraIncludes = """
+#include "pxr/usd/usdSkel/topology.h" """
+ }
+) {
+ uniform token[] joints (
+ doc = """An array of path tokens identifying the set of joints that make
+ up the skeleton, and their order. Each token in the array must be valid
+ when parsed as an SdfPath. The parent-child relationships of the
+ corresponding paths determine the parent-child relationships of each
+ joint. It is not required that the name at the end of each path be
+ unique, but rather only that the paths themselves be unique."""
+ )
+
+ uniform token[] jointNames (
+ doc = """If authored, provides a unique name per joint. This may be
+ optionally set to provide better names when translating to DCC apps
+ that require unique joint names."""
+ )
+
+ uniform matrix4d[] bindTransforms (
+ doc = """Specifies the bind-pose transforms of each joint in
+ **world space**, in the ordering imposed by *joints*."""
+ )
+ uniform matrix4d[] restTransforms (
+ doc = """Specifies the rest-pose transforms of each joint in
+ **local space**, in the ordering imposed by *joints*. This provides
+ fallback values for joint transforms when a Skeleton either has no
+ bound animation source, or when that animation source only contains
+ animation for a subset of a Skeleton's joints."""
+ )
+}
+
+
+class SkelAnimation "SkelAnimation" (
+ inherits =
+ doc = """Describes a skel animation, where joint animation is stored in a
+ vectorized form.
+
+ See the extended \\ref UsdSkel_SkelAnimation "Skel Animation"
+ documentation for more information.
+ """
+ customData = {
+ string className = "Animation"
+ }
+) {
+ uniform token[] joints (
+ doc = """Array of tokens identifying which joints this animation's
+ data applies to. The tokens for joints correspond to the tokens of
+ Skeleton primitives. The order of the joints as listed here may
+ vary from the order of joints on the Skeleton itself."""
+ )
+
+ float3[] translations (
+ doc = """Joint-local translations of all affected joints. Array length
+ should match the size of the *joints* attribute."""
+ )
+
+ quatf[] rotations (
+ doc = """Joint-local unit quaternion rotations of all affected joints,
+ in 32-bit precision. Array length should match the size of the
+ *joints* attribute."""
+ )
+
+ half3[] scales (
+ doc = """Joint-local scales of all affected joints, in
+ 16 bit precision. Array length should match the size of the *joints*
+ attribute."""
+ )
+
+ uniform token[] blendShapes (
+ doc = """Array of tokens identifying which blend shapes this
+ animation's data applies to. The tokens for blendShapes correspond to
+ the tokens set in the *skel:blendShapes* binding property of the
+ UsdSkelBindingAPI."""
+ )
+
+ float[] blendShapeWeights (
+ doc = """Array of weight values for each blend shape. Each weight value
+ is associated with the corresponding blend shape identified within the
+ *blendShapes* token array, and therefore must have the same length as
+ *blendShapes."""
+ )
+}
+
+class "SkelBindingAPI" (
+ inherits =
+ doc = """Provides API for authoring and extracting all the skinning-related
+ data that lives in the "geometry hierarchy" of prims and models that want
+ to be skeletally deformed.
+
+ See the extended \\ref UsdSkel_BindingAPI "UsdSkelBindingAPI schema"
+ documentation for more about bindings and how they apply in a scene graph.
+ """
+ customData = {
+ string className = "BindingAPI"
+ string extraIncludes = """
+#include "pxr/base/tf/span.h"
+#include "pxr/usd/usdGeom/primvar.h"
+#include "pxr/usd/usdSkel/skeleton.h" """
+ }
+)
+{
+ rel skel:animationSource (
+ customData = {
+ string apiName = "animationSource"
+ }
+ doc = """Animation source to be bound to Skeleton primitives at or
+ beneath the location at which this property is defined.
+ """
+ )
+
+ rel skel:skeleton (
+ customData = {
+ string apiName = "skeleton"
+ }
+ doc = """Skeleton to be bound to this prim and its descendents that
+ possess a mapping and weighting to the joints of the identified
+ Skeleton."""
+ )
+
+ uniform token primvars:skel:skinningMethod = "classicLinear" (
+ customData = {
+ string apiName = "skinningMethod"
+ }
+ allowedTokens = ["classicLinear", "dualQuaternion"]
+ doc = """The skinningMethod specifies the skinning method for the prim."""
+ )
+
+ matrix4d primvars:skel:geomBindTransform (
+ customData = {
+ string apiName = "geomBindTransform"
+ }
+ doc = """Encodes the bind-time world space transforms of the prim.
+ If the transform is identical for a group of gprims that share a common
+ ancestor, the transform may be authored on the ancestor, to "inherit"
+ down to all the leaf gprims. If this transform is unset, an identity
+ transform is used instead."""
+ )
+
+ uniform token[] skel:joints (
+ customData = {
+ string apiName = "joints"
+ }
+ doc = """An (optional) array of tokens defining the list of
+ joints to which jointIndices apply. If not defined, jointIndices applies
+ to the ordered list of joints defined in the bound Skeleton's *joints*
+ attribute. If undefined on a primitive, the primitive inherits the
+ value of the nearest ancestor prim, if any."""
+ )
+
+ int[] primvars:skel:jointIndices (
+ customData = {
+ string apiName = "jointIndices"
+ }
+ doc = """Indices into the *joints* attribute of the closest
+ (in namespace) bound Skeleton that affect each point of a PointBased
+ gprim. The primvar can have either *constant* or *vertex* interpolation.
+ This primvar's *elementSize* will determine how many joint influences
+ apply to each point. Indices must point be valid. Null influences should
+ be defined by setting values in jointWeights to zero.
+ See UsdGeomPrimvar for more information on interpolation and
+ elementSize."""
+ )
+
+ float[] primvars:skel:jointWeights (
+ customData = {
+ string apiName = "jointWeights"
+ }
+ doc = """Weights for the joints that affect each point of a PointBased
+ gprim. The primvar can have either *constant* or *vertex* interpolation.
+ This primvar's *elementSize* will determine how many joints influences
+ apply to each point. The length, interpolation, and elementSize of
+ *jointWeights* must match that of *jointIndices*. See UsdGeomPrimvar
+ for more information on interpolation and elementSize."""
+ )
+
+ uniform token[] skel:blendShapes (
+ customData = {
+ string apiName = "blendShapes"
+ }
+ doc = """An array of tokens defining the order onto which blend shape
+ weights from an animation source map onto the *skel:blendShapeTargets*
+ rel of a binding site. If authored, the number of elements must be equal
+ to the number of targets in the _blendShapeTargets_ rel. This property
+ is not inherited hierarchically, and is expected to be authored directly
+ on the skinnable primitive to which the blend shapes apply."""
+ )
+ rel skel:blendShapeTargets (
+ customData = {
+ string apiName= "blendShapeTargets"
+ }
+ doc = """Ordered list of all target blend shapes. This property is not
+ inherited hierarchically, and is expected to be authored directly on
+ the skinnable primitive to which the the blend shapes apply."""
+ )
+}
+
+
+class BlendShape "BlendShape" (
+ inherits =
+ doc = """Describes a target blend shape, possibly containing inbetween
+ shapes.
+
+ See the extended \\ref UsdSkel_BlendShape "Blend Shape Schema
+ documentation for information.
+ """
+ customData = {
+ string extraIncludes = """
+#include "pxr/base/tf/span.h"
+#include "pxr/usd/usdSkel/inbetweenShape.h" """
+ }
+)
+{
+ uniform vector3f[] offsets (
+ doc = """**Required property**. Position offsets which, when added to the
+ base pose, provides the target shape."""
+ )
+
+ uniform vector3f[] normalOffsets (
+ doc = """**Required property**. Normal offsets which, when added to the
+ base pose, provides the normals of the target shape."""
+ )
+
+ uniform int[] pointIndices (
+ doc = """**Optional property**. Indices into the original mesh that
+ correspond to the values in *offsets* and of any inbetween shapes. If
+ authored, the number of elements must be equal to the number of elements
+ in the *offsets* array."""
+ )
+}
+#usda 1.0
+(
+ "WARNING: THIS FILE IS GENERATED BY usdGenSchema. DO NOT EDIT."
+)
+
+class Volume "Volume" (
+ doc = """A renderable volume primitive. A volume is made up of any number
+ of FieldBase primitives bound together in this volume. Each
+ FieldBase primitive is specified as a relationship with a
+ namespace prefix of \"field\".
+
+ The relationship name is used by the renderer to associate
+ individual fields with the named input parameters on the volume
+ shader. Using this indirect approach to connecting fields to
+ shader parameters (rather than using the field prim's name)
+ allows a single field to be reused for different shader inputs, or
+ to be used as different shader parameters when rendering different
+ Volumes. This means that the name of the field prim is not
+ relevant to its contribution to the volume prims which refer to
+ it. Nor does the field prim's location in the scene graph have
+ any relevance, and Volumes may refer to fields anywhere in the
+ scene graph. **However**, unless Field prims need to be shared
+ by multiple Volumes, a Volume's Field prims should be located
+ under the Volume in namespace, for enhanced organization."""
+)
+{
+ uniform bool doubleSided = 0 (
+ doc = """Although some renderers treat all parametric or polygonal
+ surfaces as if they were effectively laminae with outward-facing
+ normals on both sides, some renderers derive significant optimizations
+ by considering these surfaces to have only a single outward side,
+ typically determined by control-point winding order and/or
+ orientation. By doing so they can perform \"backface culling\" to
+ avoid drawing the many polygons of most closed surfaces that face away
+ from the viewer.
+
+ However, it is often advantageous to model thin objects such as paper
+ and cloth as single, open surfaces that must be viewable from both
+ sides, always. Setting a gprim's doubleSided attribute to
+ \\c true instructs all renderers to disable optimizations such as
+ backface culling for the gprim, and attempt (not all renderers are able
+ to do so, but the USD reference GL renderer always will) to provide
+ forward-facing normals on each side of the surface for lighting
+ calculations."""
+ )
+ float3[] extent (
+ doc = """Extent is a three dimensional range measuring the geometric
+ extent of the authored gprim in its own local space (i.e. its own
+ transform not applied), without accounting for any shader-induced
+ displacement. If __any__ extent value has been authored for a given
+ Boundable, then it should be authored at every timeSample at which
+ geometry-affecting properties are authored, to ensure correct
+ evaluation via ComputeExtent(). If __no__ extent value has been
+ authored, then ComputeExtent() will call the Boundable's registered
+ ComputeExtentFunction(), which may be expensive, which is why we
+ strongly encourage proper authoring of extent.
+ \\sa ComputeExtent()
+ \\sa \\ref UsdGeom_Boundable_Extent.
+
+ An authored extent on a prim which has children is expected to include
+ the extent of all children, as they will be pruned from BBox computation
+ during traversal."""
+ )
+ uniform token orientation = "rightHanded" (
+ allowedTokens = ["rightHanded", "leftHanded"]
+ doc = """Orientation specifies whether the gprim's surface normal
+ should be computed using the right hand rule, or the left hand rule.
+ Please see for a deeper explanation and
+ generalization of orientation to composed scenes with transformation
+ hierarchies."""
+ )
+ color3f[] primvars:displayColor (
+ doc = '''It is useful to have an "official" colorSet that can be used
+ as a display or modeling color, even in the absence of any specified
+ shader for a gprim. DisplayColor serves this role; because it is a
+ UsdGeomPrimvar, it can also be used as a gprim override for any shader
+ that consumes a displayColor parameter.'''
+ )
+ float[] primvars:displayOpacity (
+ doc = """Companion to displayColor that specifies opacity, broken
+ out as an independent attribute rather than an rgba color, both so that
+ each can be independently overridden, and because shaders rarely consume
+ rgba parameters."""
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "FieldBase" (
+ doc = "Base class for field primitives."
+)
+{
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class "FieldAsset" (
+ doc = "Base class for field primitives defined by an external file."
+)
+{
+ token fieldDataType (
+ doc = """Token which is used to indicate the data type of an
+ individual field. Authors use this to tell consumers more
+ about the field without opening the file on disk. The list of
+ allowed tokens is specified with the specific asset type.
+ A missing value is considered an error."""
+ )
+ int fieldIndex (
+ doc = """A file can contain multiple fields with the same
+ name. This optional attribute is an index used to
+ disambiguate between these multiple fields with the same
+ name."""
+ )
+ token fieldName (
+ doc = """Name of an individual field within the file specified by
+ the filePath attribute."""
+ )
+ asset filePath (
+ doc = """An asset path attribute that points to a file on disk.
+ For each supported file format, a separate FieldAsset
+ subclass is required.
+
+ This attribute's value can be animated over time, as most
+ volume asset formats represent just a single timeSample of
+ a volume. However, it does not, at this time, support
+ any pattern substitutions like \"$F\". """
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token vectorDataRoleHint = "None" (
+ allowedTokens = ["None", "Point", "Normal", "Vector", "Color"]
+ doc = """Optional token which is used to indicate the role of a vector
+ valued field. This can drive the data type in which fields
+ are made available in a renderer or whether the vector values
+ are to be transformed."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class Field3DAsset "Field3DAsset" (
+ doc = """Field3D field primitive. The FieldAsset filePath attribute must
+ specify a file in the Field3D format on disk."""
+)
+{
+ token fieldDataType (
+ allowedTokens = ["half", "float", "double", "half3", "float3", "double3"]
+ doc = """Token which is used to indicate the data type of an
+ individual field. Authors use this to tell consumers more
+ about the field without opening the file on disk. The list of
+ allowed tokens reflects the available choices for Field3d
+ volumes."""
+ )
+ int fieldIndex (
+ doc = """A file can contain multiple fields with the same
+ name. This optional attribute is an index used to
+ disambiguate between these multiple fields with the same
+ name."""
+ )
+ token fieldName (
+ doc = """Name of an individual field within the file specified by
+ the filePath attribute."""
+ )
+ token fieldPurpose (
+ doc = """Optional token which can be used to indicate the purpose or
+ grouping of an individual field. Clients which consume Field3D
+ files should treat this as the Field3D field name."""
+ )
+ asset filePath (
+ doc = """An asset path attribute that points to a file on disk.
+ For each supported file format, a separate FieldAsset
+ subclass is required.
+
+ This attribute's value can be animated over time, as most
+ volume asset formats represent just a single timeSample of
+ a volume. However, it does not, at this time, support
+ any pattern substitutions like \"$F\". """
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token vectorDataRoleHint = "None" (
+ allowedTokens = ["None", "Point", "Normal", "Vector", "Color"]
+ doc = """Optional token which is used to indicate the role of a vector
+ valued field. This can drive the data type in which fields
+ are made available in a renderer or whether the vector values
+ are to be transformed."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+class OpenVDBAsset "OpenVDBAsset" (
+ doc = """OpenVDB field primitive. The FieldAsset filePath attribute must
+ specify a file in the OpenVDB format on disk."""
+)
+{
+ token fieldClass (
+ allowedTokens = ["levelSet", "fogVolume", "staggered", "unknown"]
+ doc = """Optional token which can be used to indicate the class of
+ an individual grid. This is a mapping to openvdb::GridClass
+ where the values are GRID_LEVEL_SET, GRID_FOG_VOLUME,
+ GRID_STAGGERED, and GRID_UNKNOWN."""
+ )
+ token fieldDataType (
+ allowedTokens = ["half", "float", "double", "int", "uint", "int64", "half2", "float2", "double2", "int2", "half3", "float3", "double3", "int3", "matrix3d", "matrix4d", "quatd", "bool", "mask", "string"]
+ doc = """Token which is used to indicate the data type of an
+ individual field. Authors use this to tell consumers more
+ about the field without opening the file on disk. The list of
+ allowed tokens reflects the available choices for OpenVDB
+ volumes."""
+ )
+ int fieldIndex (
+ doc = """A file can contain multiple fields with the same
+ name. This optional attribute is an index used to
+ disambiguate between these multiple fields with the same
+ name."""
+ )
+ token fieldName (
+ doc = """Name of an individual field within the file specified by
+ the filePath attribute."""
+ )
+ asset filePath (
+ doc = """An asset path attribute that points to a file on disk.
+ For each supported file format, a separate FieldAsset
+ subclass is required.
+
+ This attribute's value can be animated over time, as most
+ volume asset formats represent just a single timeSample of
+ a volume. However, it does not, at this time, support
+ any pattern substitutions like \"$F\". """
+ )
+ rel proxyPrim (
+ doc = '''The proxyPrim relationship allows us to link a
+ prim whose purpose is "render" to its (single target)
+ purpose="proxy" prim. This is entirely optional, but can be
+ useful in several scenarios:
+
+ - In a pipeline that does pruning (for complexity management)
+ by deactivating prims composed from asset references, when we
+ deactivate a purpose="render" prim, we will be able to discover
+ and additionally deactivate its associated purpose="proxy" prim,
+ so that preview renders reflect the pruning accurately.
+
+ - DCC importers may be able to make more aggressive optimizations
+ for interactive processing and display if they can discover the proxy
+ for a given render prim.
+
+ - With a little more work, a Hydra-based application will be able
+ to map a picked proxy prim back to its render geometry for selection.
+
+ \\note It is only valid to author the proxyPrim relationship on
+ prims whose purpose is "render".'''
+ )
+ uniform token purpose = "default" (
+ allowedTokens = ["default", "render", "proxy", "guide"]
+ doc = """Purpose is a classification of geometry into categories that
+ can each be independently included or excluded from traversals of prims
+ on a stage, such as rendering or bounding-box computation traversals.
+
+ See for more detail about how
+ purpose is computed and used."""
+ )
+ token vectorDataRoleHint = "None" (
+ allowedTokens = ["None", "Point", "Normal", "Vector", "Color"]
+ doc = """Optional token which is used to indicate the role of a vector
+ valued field. This can drive the data type in which fields
+ are made available in a renderer or whether the vector values
+ are to be transformed."""
+ )
+ token visibility = "inherited" (
+ allowedTokens = ["inherited", "invisible"]
+ doc = '''Visibility is meant to be the simplest form of "pruning"
+ visibility that is supported by most DCC apps. Visibility is
+ animatable, allowing a sub-tree of geometry to be present for some
+ segment of a shot, and absent from others; unlike the action of
+ deactivating geometry prims, invisible geometry is still
+ available for inspection, for positioning, for defining volumes, etc.'''
+ )
+ uniform token[] xformOpOrder (
+ doc = """Encodes the sequence of transformation operations in the
+ order in which they should be pushed onto a transform stack while
+ visiting a UsdStage's prims in a graph traversal that will effect
+ the desired positioning for this prim and its descendant prims.
+
+ You should rarely, if ever, need to manipulate this attribute directly.
+ It is managed by the AddXformOp(), SetResetXformStack(), and
+ SetXformOpOrder(), and consulted by GetOrderedXformOps() and
+ GetLocalTransformation()."""
+ )
+}
+
+# Portions of this file auto-generated by usdGenSchema.
+# Edits will survive regeneration except for comments and
+# changes to types with autoGenerated=true.
+{
+ "Plugins": [
+ {
+ "Info": {
+ "Types": {
+ "UsdVolField3DAsset": {
+ "alias": {
+ "UsdSchemaBase": "Field3DAsset"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdVolFieldAsset"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdVolFieldAsset": {
+ "alias": {
+ "UsdSchemaBase": "FieldAsset"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdVolFieldBase"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdVolFieldBase": {
+ "alias": {
+ "UsdSchemaBase": "FieldBase"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomXformable"
+ ],
+ "schemaKind": "abstractTyped"
+ },
+ "UsdVolOpenVDBAsset": {
+ "alias": {
+ "UsdSchemaBase": "OpenVDBAsset"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdVolFieldAsset"
+ ],
+ "schemaKind": "concreteTyped"
+ },
+ "UsdVolVolume": {
+ "alias": {
+ "UsdSchemaBase": "Volume"
+ },
+ "autoGenerated": true,
+ "bases": [
+ "UsdGeomGprim"
+ ],
+ "schemaKind": "concreteTyped"
+ }
+ }
+ },
+ "LibraryPath": "",
+ "Name": "usdVol",
+ "ResourcePath": "resources",
+ "Root": "..",
+ "Type": "library"
+ }
+ ]
+}
+#usda 1.0
+
+(
+ subLayers = [
+ @usdGeom/schema.usda@
+ ]
+)
+
+over "GLOBAL" (
+ customData = {
+ string libraryName = "usdVol"
+ string libraryPath = "pxr/usd/usdVol"
+ dictionary libraryTokens = {
+ dictionary field = {
+ string doc = """This is the namespace prefix used to
+ specify the fields that make up a volume primitive."""
+ }
+ }
+ }
+)
+{
+}
+
+class Volume "Volume" (
+ inherits =
+ doc = """A renderable volume primitive. A volume is made up of any number
+ of FieldBase primitives bound together in this volume. Each
+ FieldBase primitive is specified as a relationship with a
+ namespace prefix of "field".
+
+ The relationship name is used by the renderer to associate
+ individual fields with the named input parameters on the volume
+ shader. Using this indirect approach to connecting fields to
+ shader parameters (rather than using the field prim's name)
+ allows a single field to be reused for different shader inputs, or
+ to be used as different shader parameters when rendering different
+ Volumes. This means that the name of the field prim is not
+ relevant to its contribution to the volume prims which refer to
+ it. Nor does the field prim's location in the scene graph have
+ any relevance, and Volumes may refer to fields anywhere in the
+ scene graph. **However**, unless Field prims need to be shared
+ by multiple Volumes, a Volume's Field prims should be located
+ under the Volume in namespace, for enhanced organization."""
+)
+{
+}
+
+class "FieldBase" (
+ inherits =
+ doc = """Base class for field primitives."""
+)
+{
+}
+
+class "FieldAsset" (
+ doc = "Base class for field primitives defined by an external file."
+ inherits =
+)
+{
+ asset filePath (
+ doc = """An asset path attribute that points to a file on disk.
+ For each supported file format, a separate FieldAsset
+ subclass is required.
+
+ This attribute's value can be animated over time, as most
+ volume asset formats represent just a single timeSample of
+ a volume. However, it does not, at this time, support
+ any pattern substitutions like \"$F\". """
+ )
+ token fieldName (
+ doc = """Name of an individual field within the file specified by
+ the filePath attribute."""
+ )
+ int fieldIndex (
+ doc = """A file can contain multiple fields with the same
+ name. This optional attribute is an index used to
+ disambiguate between these multiple fields with the same
+ name."""
+ )
+ token fieldDataType (
+ doc = """Token which is used to indicate the data type of an
+ individual field. Authors use this to tell consumers more
+ about the field without opening the file on disk. The list of
+ allowed tokens is specified with the specific asset type.
+ A missing value is considered an error."""
+ )
+ token vectorDataRoleHint = "None" (
+ allowedTokens = ["None", "Point", "Normal", "Vector", "Color"]
+ doc = """Optional token which is used to indicate the role of a vector
+ valued field. This can drive the data type in which fields
+ are made available in a renderer or whether the vector values
+ are to be transformed."""
+ )
+}
+
+class Field3DAsset "Field3DAsset" (
+ doc = """Field3D field primitive. The FieldAsset filePath attribute must
+ specify a file in the Field3D format on disk."""
+ inherits =
+)
+{
+ token fieldDataType (
+ allowedTokens = ["half", "float", "double",
+ "half3", "float3", "double3"]
+ doc = """Token which is used to indicate the data type of an
+ individual field. Authors use this to tell consumers more
+ about the field without opening the file on disk. The list of
+ allowed tokens reflects the available choices for Field3d
+ volumes."""
+ )
+ token fieldPurpose (
+ doc = """Optional token which can be used to indicate the purpose or
+ grouping of an individual field. Clients which consume Field3D
+ files should treat this as the Field3D field \\em name."""
+ )
+}
+
+class OpenVDBAsset "OpenVDBAsset" (
+ doc = """OpenVDB field primitive. The FieldAsset filePath attribute must
+ specify a file in the OpenVDB format on disk."""
+ inherits =
+)
+{
+ token fieldDataType (
+ allowedTokens = ["half", "float", "double", "int", "uint", "int64",
+ "half2", "float2", "double2", "int2",
+ "half3", "float3", "double3", "int3",
+ "matrix3d", "matrix4d", "quatd",
+ "bool", "mask", "string"]
+ doc = """Token which is used to indicate the data type of an
+ individual field. Authors use this to tell consumers more
+ about the field without opening the file on disk. The list of
+ allowed tokens reflects the available choices for OpenVDB
+ volumes."""
+ )
+ token fieldClass (
+ allowedTokens = ["levelSet", "fogVolume", "staggered", "unknown"]
+ doc = """Optional token which can be used to indicate the class of
+ an individual grid. This is a mapping to openvdb::GridClass
+ where the values are GRID_LEVEL_SET, GRID_FOG_VOLUME,
+ GRID_STAGGERED, and GRID_UNKNOWN."""
+ )
+}
+
diff --git a/usd-wasm/src/bindings-debug/emHdBindings.js b/usd-wasm/src/bindings-debug/emHdBindings.js
new file mode 100644
index 0000000..67627b7
--- /dev/null
+++ b/usd-wasm/src/bindings-debug/emHdBindings.js
@@ -0,0 +1,12965 @@
+var getUsdModule = ((args) => {
+ var _scriptDir =
+ typeof document !== "undefined" && document.currentScript
+ ? document.currentScript.src
+ : undefined;
+ if (typeof __filename !== "undefined") _scriptDir = _scriptDir || __filename;
+ return function (moduleArg = {
+ // module overrides can be supplied here
+ locateFile: (path, prefix) => {
+ if (!prefix && _scriptDir) prefix = _scriptDir.substr(0, _scriptDir.lastIndexOf('/') + 1);
+ return prefix + path;
+ },
+ ...args
+}) {
+ // Support for growable heap + pthreads, where the buffer may change, so JS views
+ // must be updated.
+ function GROWABLE_HEAP_I8() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAP8;
+ }
+ function GROWABLE_HEAP_U8() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAPU8;
+ }
+ function GROWABLE_HEAP_I16() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAP16;
+ }
+ function GROWABLE_HEAP_U16() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAPU16;
+ }
+ function GROWABLE_HEAP_I32() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAP32;
+ }
+ function GROWABLE_HEAP_U32() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAPU32;
+ }
+ function GROWABLE_HEAP_F32() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAPF32;
+ }
+ function GROWABLE_HEAP_F64() {
+ if (wasmMemory.buffer != HEAP8.buffer) {
+ updateMemoryViews();
+ }
+ return HEAPF64;
+ }
+
+ var Module = moduleArg;
+
+ var readyPromiseResolve, readyPromiseReject;
+
+ Module["ready"] = new Promise((resolve, reject) => {
+ readyPromiseResolve = resolve;
+ readyPromiseReject = reject;
+ });
+
+ if (!Module.expectedDataFileDownloads) {
+ Module.expectedDataFileDownloads = 0;
+ }
+
+ Module.expectedDataFileDownloads++;
+
+ (function () {
+ if (Module["ENVIRONMENT_IS_PTHREAD"] || Module["$ww"]) return;
+ var loadPackage = function (metadata) {
+ var PACKAGE_PATH = "";
+ if (typeof window === "object") {
+ PACKAGE_PATH = window["encodeURIComponent"](
+ window.location.pathname
+ .toString()
+ .substring(
+ 0,
+ window.location.pathname.toString().lastIndexOf("/"),
+ ) + "/",
+ );
+ } else if (
+ typeof process === "undefined" &&
+ typeof location !== "undefined"
+ ) {
+ PACKAGE_PATH = encodeURIComponent(
+ location.pathname
+ .toString()
+ .substring(0, location.pathname.toString().lastIndexOf("/")) +
+ "/",
+ );
+ }
+ var PACKAGE_NAME = "emHdBindings.data";
+ var REMOTE_PACKAGE_BASE = "emHdBindings.data";
+ if (
+ typeof Module["locateFilePackage"] === "function" &&
+ !Module["locateFile"]
+ ) {
+ Module["locateFile"] = Module["locateFilePackage"];
+ err(
+ "warning: you defined Module.locateFilePackage, that has been renamed to Module.locateFile (using your locateFilePackage for now)",
+ );
+ }
+ var REMOTE_PACKAGE_NAME = Module["locateFile"]
+ ? Module["locateFile"](REMOTE_PACKAGE_BASE, "")
+ : REMOTE_PACKAGE_BASE;
+ var REMOTE_PACKAGE_SIZE = metadata["remote_package_size"];
+ function fetchRemotePackage(
+ packageName,
+ packageSize,
+ callback,
+ errback,
+ ) {
+ if (
+ typeof process === "object" &&
+ typeof process.versions === "object" &&
+ typeof process.versions.node === "string"
+ ) {
+ require("fs").readFile(packageName, function (err, contents) {
+ if (err) {
+ errback(err);
+ } else {
+ callback(contents.buffer);
+ }
+ });
+ return;
+ }
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", packageName, true);
+ xhr.responseType = "arraybuffer";
+ xhr.onprogress = function (event) {
+ var url = packageName;
+ var size = packageSize;
+ if (event.total) size = event.total;
+ if (event.loaded) {
+ if (!xhr.addedTotal) {
+ xhr.addedTotal = true;
+ if (!Module.dataFileDownloads) Module.dataFileDownloads = {};
+ Module.dataFileDownloads[url] = {
+ loaded: event.loaded,
+ total: size,
+ };
+ } else {
+ Module.dataFileDownloads[url].loaded = event.loaded;
+ }
+ var total = 0;
+ var loaded = 0;
+ var num = 0;
+ for (var download in Module.dataFileDownloads) {
+ var data = Module.dataFileDownloads[download];
+ total += data.total;
+ loaded += data.loaded;
+ num++;
+ }
+ total = Math.ceil(
+ (total * Module.expectedDataFileDownloads) / num,
+ );
+ if (Module["setStatus"])
+ Module["setStatus"](`Downloading data... (${loaded}/${total})`);
+ } else if (!Module.dataFileDownloads) {
+ if (Module["setStatus"])
+ Module["setStatus"]("Downloading data...");
+ }
+ };
+ xhr.onerror = function (event) {
+ throw new Error("NetworkError for: " + packageName);
+ };
+ xhr.onload = function (event) {
+ if (
+ xhr.status == 200 ||
+ xhr.status == 304 ||
+ xhr.status == 206 ||
+ (xhr.status == 0 && xhr.response)
+ ) {
+ var packageData = xhr.response;
+ callback(packageData);
+ } else {
+ throw new Error(xhr.statusText + " : " + xhr.responseURL);
+ }
+ };
+ xhr.send(null);
+ }
+ function handleError(error) {
+ console.error("package error:", error);
+ }
+ var fetchedCallback = null;
+ var fetched = Module["getPreloadedPackage"]
+ ? Module["getPreloadedPackage"](
+ REMOTE_PACKAGE_NAME,
+ REMOTE_PACKAGE_SIZE,
+ )
+ : null;
+ if (!fetched)
+ fetchRemotePackage(
+ REMOTE_PACKAGE_NAME,
+ REMOTE_PACKAGE_SIZE,
+ function (data) {
+ if (fetchedCallback) {
+ fetchedCallback(data);
+ fetchedCallback = null;
+ } else {
+ fetched = data;
+ }
+ },
+ handleError,
+ );
+ function runWithFS() {
+ function assert(check, msg) {
+ if (!check) throw msg + new Error().stack;
+ }
+ Module["FS_createPath"]("/", "usd", true, true);
+ Module["FS_createPath"]("/usd", "ar", true, true);
+ Module["FS_createPath"]("/usd/ar", "resources", true, true);
+ Module["FS_createPath"]("/usd", "hd", true, true);
+ Module["FS_createPath"]("/usd/hd", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/hd/resources",
+ "codegenTemplates",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "httpResolver", true, true);
+ Module["FS_createPath"]("/usd/httpResolver", "resources", true, true);
+ Module["FS_createPath"]("/usd", "ndr", true, true);
+ Module["FS_createPath"]("/usd/ndr", "resources", true, true);
+ Module["FS_createPath"]("/usd", "sdf", true, true);
+ Module["FS_createPath"]("/usd/sdf", "resources", true, true);
+ Module["FS_createPath"]("/usd", "usd", true, true);
+ Module["FS_createPath"]("/usd/usd", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usd/resources",
+ "codegenTemplates",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd/usd/resources", "usd", true, true);
+ Module["FS_createPath"]("/usd", "usdGeom", true, true);
+ Module["FS_createPath"]("/usd/usdGeom", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdGeom/resources",
+ "usdGeom",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "usdHydra", true, true);
+ Module["FS_createPath"]("/usd/usdHydra", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdHydra/resources",
+ "shaders",
+ true,
+ true,
+ );
+ Module["FS_createPath"](
+ "/usd/usdHydra/resources",
+ "usdHydra",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "usdImaging", true, true);
+ Module["FS_createPath"]("/usd/usdImaging", "resources", true, true);
+ Module["FS_createPath"]("/usd", "usdLux", true, true);
+ Module["FS_createPath"]("/usd/usdLux", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdLux/resources",
+ "usdLux",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "usdRender", true, true);
+ Module["FS_createPath"]("/usd/usdRender", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdRender/resources",
+ "usdRender",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "usdShade", true, true);
+ Module["FS_createPath"]("/usd/usdShade", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdShade/resources",
+ "usdShade",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "usdShaders", true, true);
+ Module["FS_createPath"]("/usd/usdShaders", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdShaders/resources",
+ "shaders",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "usdSkel", true, true);
+ Module["FS_createPath"]("/usd/usdSkel", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdSkel/resources",
+ "usdSkel",
+ true,
+ true,
+ );
+ Module["FS_createPath"]("/usd", "usdVol", true, true);
+ Module["FS_createPath"]("/usd/usdVol", "resources", true, true);
+ Module["FS_createPath"](
+ "/usd/usdVol/resources",
+ "usdVol",
+ true,
+ true,
+ );
+ /** @constructor */ function DataRequest(start, end, audio) {
+ this.start = start;
+ this.end = end;
+ this.audio = audio;
+ }
+ DataRequest.prototype = {
+ requests: {},
+ open: function (mode, name) {
+ this.name = name;
+ this.requests[name] = this;
+ Module["addRunDependency"](`fp ${this.name}`);
+ },
+ send: function () {},
+ onload: function () {
+ var byteArray = this.byteArray.subarray(this.start, this.end);
+ this.finish(byteArray);
+ },
+ finish: function (byteArray) {
+ var that = this;
+ Module["FS_createDataFile"](
+ this.name,
+ null,
+ byteArray,
+ true,
+ true,
+ true,
+ );
+ Module["removeRunDependency"](`fp ${that.name}`);
+ this.requests[this.name] = null;
+ },
+ };
+ var files = metadata["files"];
+ for (var i = 0; i < files.length; ++i) {
+ new DataRequest(
+ files[i]["start"],
+ files[i]["end"],
+ files[i]["audio"] || 0,
+ ).open("GET", files[i]["filename"]);
+ }
+ function processPackageData(arrayBuffer) {
+ assert(arrayBuffer, "Loading data file failed.");
+ assert(
+ arrayBuffer.constructor.name === ArrayBuffer.name,
+ "bad input to processPackageData",
+ );
+ var byteArray = new Uint8Array(arrayBuffer);
+ var curr;
+ DataRequest.prototype.byteArray = byteArray;
+ var files = metadata["files"];
+ for (var i = 0; i < files.length; ++i) {
+ DataRequest.prototype.requests[files[i].filename].onload();
+ }
+ Module["removeRunDependency"]("datafile_emHdBindings.data");
+ }
+ Module["addRunDependency"]("datafile_emHdBindings.data");
+ if (!Module.preloadResults) Module.preloadResults = {};
+ Module.preloadResults[PACKAGE_NAME] = {
+ fromCache: false,
+ };
+ if (fetched) {
+ processPackageData(fetched);
+ fetched = null;
+ } else {
+ fetchedCallback = processPackageData;
+ }
+ }
+ if (Module["calledRun"]) {
+ runWithFS();
+ } else {
+ if (!Module["preRun"]) Module["preRun"] = [];
+ Module["preRun"].push(runWithFS);
+ }
+ };
+ loadPackage({
+ files: [
+ {
+ filename: "/usd/ar/resources/plugInfo.json",
+ start: 0,
+ end: 589,
+ },
+ {
+ filename: "/usd/hd/resources/codegenTemplates/schemaClass.cpp",
+ start: 589,
+ end: 9620,
+ },
+ {
+ filename: "/usd/hd/resources/codegenTemplates/schemaClass.h",
+ start: 9620,
+ end: 20258,
+ },
+ {
+ filename: "/usd/hd/resources/plugInfo.json",
+ start: 20258,
+ end: 20483,
+ },
+ {
+ filename: "/usd/httpResolver/resources/plugInfo.json",
+ start: 20483,
+ end: 20823,
+ },
+ {
+ filename: "/usd/ndr/resources/plugInfo.json",
+ start: 20823,
+ end: 21397,
+ },
+ {
+ filename: "/usd/plugInfo.json",
+ start: 21397,
+ end: 21448,
+ },
+ {
+ filename: "/usd/sdf/resources/plugInfo.json",
+ start: 21448,
+ end: 22544,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/api.h",
+ start: 22544,
+ end: 24646,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/plugInfo.json",
+ start: 24646,
+ end: 25003,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/schemaClass.cpp",
+ start: 25003,
+ end: 37342,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/schemaClass.h",
+ start: 37342,
+ end: 54229,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/tokens.cpp",
+ start: 54229,
+ end: 55817,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/tokens.h",
+ start: 55817,
+ end: 59072,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/wrapSchemaClass.cpp",
+ start: 59072,
+ end: 68578,
+ },
+ {
+ filename: "/usd/usd/resources/codegenTemplates/wrapTokens.cpp",
+ start: 68578,
+ end: 71099,
+ },
+ {
+ filename: "/usd/usd/resources/generatedSchema.usda",
+ start: 71099,
+ end: 86803,
+ },
+ {
+ filename: "/usd/usd/resources/plugInfo.json",
+ start: 86803,
+ end: 92530,
+ },
+ {
+ filename: "/usd/usd/resources/usd/schema.usda",
+ start: 92530,
+ end: 110774,
+ },
+ {
+ filename: "/usd/usdGeom/resources/generatedSchema.usda",
+ start: 110774,
+ end: 355640,
+ },
+ {
+ filename: "/usd/usdGeom/resources/plugInfo.json",
+ start: 355640,
+ end: 371761,
+ },
+ {
+ filename: "/usd/usdGeom/resources/usdGeom/schema.usda",
+ start: 371761,
+ end: 500520,
+ },
+ {
+ filename: "/usd/usdHydra/resources/generatedSchema.usda",
+ start: 500520,
+ end: 501538,
+ },
+ {
+ filename: "/usd/usdHydra/resources/plugInfo.json",
+ start: 501538,
+ end: 502826,
+ },
+ {
+ filename: "/usd/usdHydra/resources/shaders/empty.glslfx",
+ start: 502826,
+ end: 504049,
+ },
+ {
+ filename: "/usd/usdHydra/resources/shaders/shaderDefs.usda",
+ start: 504049,
+ end: 510806,
+ },
+ {
+ filename: "/usd/usdHydra/resources/usdHydra/schema.usda",
+ start: 510806,
+ end: 518107,
+ },
+ {
+ filename: "/usd/usdImaging/resources/plugInfo.json",
+ start: 518107,
+ end: 531776,
+ },
+ {
+ filename: "/usd/usdLux/resources/generatedSchema.usda",
+ start: 531776,
+ end: 610666,
+ },
+ {
+ filename: "/usd/usdLux/resources/plugInfo.json",
+ start: 610666,
+ end: 620463,
+ },
+ {
+ filename: "/usd/usdLux/resources/usdLux/schema.usda",
+ start: 620463,
+ end: 662394,
+ },
+ {
+ filename: "/usd/usdRender/resources/generatedSchema.usda",
+ start: 662394,
+ end: 685685,
+ },
+ {
+ filename: "/usd/usdRender/resources/plugInfo.json",
+ start: 685685,
+ end: 688784,
+ },
+ {
+ filename: "/usd/usdRender/resources/usdRender/schema.usda",
+ start: 688784,
+ end: 705492,
+ },
+ {
+ filename: "/usd/usdShade/resources/generatedSchema.usda",
+ start: 705492,
+ end: 721492,
+ },
+ {
+ filename: "/usd/usdShade/resources/plugInfo.json",
+ start: 721492,
+ end: 727378,
+ },
+ {
+ filename: "/usd/usdShade/resources/usdShade/schema.usda",
+ start: 727378,
+ end: 754223,
+ },
+ {
+ filename: "/usd/usdShaders/resources/plugInfo.json",
+ start: 754223,
+ end: 754902,
+ },
+ {
+ filename: "/usd/usdShaders/resources/shaders/previewSurface.glslfx",
+ start: 754902,
+ end: 768823,
+ },
+ {
+ filename: "/usd/usdShaders/resources/shaders/primvarReader.glslfx",
+ start: 768823,
+ end: 770046,
+ },
+ {
+ filename: "/usd/usdShaders/resources/shaders/shaderDefs.usda",
+ start: 770046,
+ end: 784976,
+ },
+ {
+ filename: "/usd/usdShaders/resources/shaders/transform2d.glslfx",
+ start: 784976,
+ end: 786199,
+ },
+ {
+ filename: "/usd/usdShaders/resources/shaders/uvTexture.glslfx",
+ start: 786199,
+ end: 787422,
+ },
+ {
+ filename: "/usd/usdSkel/resources/generatedSchema.usda",
+ start: 787422,
+ end: 802913,
+ },
+ {
+ filename: "/usd/usdSkel/resources/plugInfo.json",
+ start: 802913,
+ end: 805907,
+ },
+ {
+ filename: "/usd/usdSkel/resources/usdSkel/schema.usda",
+ start: 805907,
+ end: 816312,
+ },
+ {
+ filename: "/usd/usdVol/resources/generatedSchema.usda",
+ start: 816312,
+ end: 840757,
+ },
+ {
+ filename: "/usd/usdVol/resources/plugInfo.json",
+ start: 840757,
+ end: 843193,
+ },
+ {
+ filename: "/usd/usdVol/resources/usdVol/schema.usda",
+ start: 843193,
+ end: 849069,
+ },
+ ],
+ remote_package_size: 849069,
+ });
+ })();
+
+ if (!Module["ENVIRONMENT_IS_PTHREAD"]) {
+ function isMobileDevice() {
+ return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(
+ navigator.userAgent,
+ );
+ }
+ const MAX_MEMORY_MOBILE = 1024 * 1024 * 1024;
+ const MAX_MEMORY_DESKTOP = 4 * 1024 * 1024 * 1024;
+ const MAX_DEVICE_MEMORY = isMobileDevice()
+ ? MAX_MEMORY_MOBILE
+ : MAX_MEMORY_DESKTOP;
+ Module["wasmMemory"] = new WebAssembly.Memory({
+ initial: 16777216 / 65536,
+ maximum: MAX_DEVICE_MEMORY / 65536,
+ shared: true,
+ });
+ }
+
+ var moduleOverrides = Object.assign({}, Module);
+
+ var arguments_ = [];
+
+ var thisProgram = "./this.program";
+
+ var quit_ = (status, toThrow) => {
+ throw toThrow;
+ };
+
+ var ENVIRONMENT_IS_WEB = typeof window == "object";
+
+ var ENVIRONMENT_IS_WORKER = typeof importScripts == "function";
+
+ var ENVIRONMENT_IS_NODE =
+ typeof process == "object" &&
+ typeof process.versions == "object" &&
+ typeof process.versions.node == "string";
+
+ var ENVIRONMENT_IS_SHELL =
+ !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER;
+
+ var ENVIRONMENT_IS_PTHREAD = Module["ENVIRONMENT_IS_PTHREAD"] || false;
+
+ var scriptDirectory = "";
+
+ function locateFile(path) {
+ if (Module["locateFile"]) {
+ return Module["locateFile"](path, scriptDirectory);
+ }
+ return scriptDirectory + path;
+ }
+
+ var read_, readAsync, readBinary;
+
+ if (ENVIRONMENT_IS_NODE) {
+ var fs = require("fs");
+ var nodePath = require("path");
+ if (ENVIRONMENT_IS_WORKER) {
+ scriptDirectory = nodePath.dirname(scriptDirectory) + "/";
+ } else {
+ scriptDirectory = __dirname + "/";
+ }
+ read_ = (filename, binary) => {
+ filename = isFileURI(filename)
+ ? new URL(filename)
+ : nodePath.normalize(filename);
+ return fs.readFileSync(filename, binary ? undefined : "utf8");
+ };
+ readBinary = (filename) => {
+ var ret = read_(filename, true);
+ if (!ret.buffer) {
+ ret = new Uint8Array(ret);
+ }
+ return ret;
+ };
+ readAsync = (filename, onload, onerror, binary = true) => {
+ filename = isFileURI(filename)
+ ? new URL(filename)
+ : nodePath.normalize(filename);
+ fs.readFile(filename, binary ? undefined : "utf8", (err, data) => {
+ if (err) onerror(err);
+ else onload(binary ? data.buffer : data);
+ });
+ };
+ if (!Module["thisProgram"] && process.argv.length > 1) {
+ thisProgram = process.argv[1].replace(/\\/g, "/");
+ }
+ arguments_ = process.argv.slice(2);
+ quit_ = (status, toThrow) => {
+ process.exitCode = status;
+ throw toThrow;
+ };
+ Module["inspect"] = () => "[Emscripten Module object]";
+ let nodeWorkerThreads;
+ try {
+ nodeWorkerThreads = require("worker_threads");
+ } catch (e) {
+ console.error(
+ 'The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?',
+ );
+ throw e;
+ }
+ global.Worker = nodeWorkerThreads.Worker;
+ } else if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) {
+ if (ENVIRONMENT_IS_WORKER) {
+ scriptDirectory = self.location.href;
+ } else if (typeof document != "undefined" && document.currentScript) {
+ scriptDirectory = document.currentScript.src;
+ }
+ if (_scriptDir) {
+ scriptDirectory = _scriptDir;
+ }
+ if (scriptDirectory.indexOf("blob:") !== 0) {
+ scriptDirectory = scriptDirectory.substr(
+ 0,
+ scriptDirectory.replace(/[?#].*/, "").lastIndexOf("/") + 1,
+ );
+ } else {
+ scriptDirectory = "";
+ }
+ if (!ENVIRONMENT_IS_NODE) {
+ read_ = (url) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", url, false);
+ xhr.send(null);
+ return xhr.responseText;
+ };
+ if (ENVIRONMENT_IS_WORKER) {
+ readBinary = (url) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", url, false);
+ xhr.responseType = "arraybuffer";
+ xhr.send(null);
+ return new Uint8Array(/** @type{!ArrayBuffer} */ (xhr.response));
+ };
+ }
+ readAsync = (url, onload, onerror) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", url, true);
+ xhr.responseType = "arraybuffer";
+ xhr.onload = () => {
+ if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) {
+ onload(xhr.response);
+ return;
+ }
+ onerror();
+ };
+ xhr.onerror = onerror;
+ xhr.send(null);
+ };
+ }
+ } else {
+ }
+
+ if (ENVIRONMENT_IS_NODE) {
+ if (typeof performance == "undefined") {
+ global.performance = require("perf_hooks").performance;
+ }
+ }
+
+ var defaultPrint = console.log.bind(console);
+
+ var defaultPrintErr = console.error.bind(console);
+
+ if (ENVIRONMENT_IS_NODE) {
+ defaultPrint = (...args) => fs.writeSync(1, args.join(" ") + "\n");
+ defaultPrintErr = (...args) => fs.writeSync(2, args.join(" ") + "\n");
+ }
+
+ var out = Module["print"] || defaultPrint;
+
+ var err = Module["printErr"] || defaultPrintErr;
+
+ Object.assign(Module, moduleOverrides);
+
+ moduleOverrides = null;
+
+ if (Module["arguments"]) arguments_ = Module["arguments"];
+
+ if (Module["thisProgram"]) thisProgram = Module["thisProgram"];
+
+ if (Module["quit"]) quit_ = Module["quit"];
+
+ var wasmBinary;
+
+ if (Module["wasmBinary"]) wasmBinary = Module["wasmBinary"];
+
+ if (typeof WebAssembly != "object") {
+ abort("no native wasm support detected");
+ }
+
+ var wasmMemory;
+
+ var wasmModule;
+
+ var ABORT = false;
+
+ var EXITSTATUS;
+
+ /** @type {function(*, string=)} */ function assert(condition, text) {
+ if (!condition) {
+ abort(text);
+ }
+ }
+
+ var HEAP,
+ /** @type {!Int8Array} */ HEAP8,
+ /** @type {!Uint8Array} */ HEAPU8,
+ /** @type {!Int16Array} */ HEAP16,
+ /** @type {!Uint16Array} */ HEAPU16,
+ /** @type {!Int32Array} */ HEAP32,
+ /** @type {!Uint32Array} */ HEAPU32,
+ /** @type {!Float32Array} */ HEAPF32,
+ /** @type {!Float64Array} */ HEAPF64;
+
+ function updateMemoryViews() {
+ var b = wasmMemory.buffer;
+ Module["HEAP8"] = HEAP8 = new Int8Array(b);
+ Module["HEAP16"] = HEAP16 = new Int16Array(b);
+ Module["HEAPU8"] = HEAPU8 = new Uint8Array(b);
+ Module["HEAPU16"] = HEAPU16 = new Uint16Array(b);
+ Module["HEAP32"] = HEAP32 = new Int32Array(b);
+ Module["HEAPU32"] = HEAPU32 = new Uint32Array(b);
+ Module["HEAPF32"] = HEAPF32 = new Float32Array(b);
+ Module["HEAPF64"] = HEAPF64 = new Float64Array(b);
+ }
+
+ var INITIAL_MEMORY = Module["INITIAL_MEMORY"] || 16777216;
+
+ assert(
+ INITIAL_MEMORY >= 5242880,
+ "INITIAL_MEMORY should be larger than STACK_SIZE, was " +
+ INITIAL_MEMORY +
+ "! (STACK_SIZE=" +
+ 5242880 +
+ ")",
+ );
+
+ if (ENVIRONMENT_IS_PTHREAD) {
+ wasmMemory = Module["wasmMemory"];
+ } else {
+ if (Module["wasmMemory"]) {
+ wasmMemory = Module["wasmMemory"];
+ } else {
+ wasmMemory = new WebAssembly.Memory({
+ initial: INITIAL_MEMORY / 65536,
+ maximum: 4294967296 / 65536,
+ shared: true,
+ });
+ if (!(wasmMemory.buffer instanceof SharedArrayBuffer)) {
+ err(
+ "requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag",
+ );
+ if (ENVIRONMENT_IS_NODE) {
+ err(
+ "(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and/or recent version)",
+ );
+ }
+ throw Error("bad memory");
+ }
+ }
+ }
+
+ updateMemoryViews();
+
+ INITIAL_MEMORY = wasmMemory.buffer.byteLength;
+
+ var __ATPRERUN__ = [];
+
+ var __ATINIT__ = [];
+
+ var __ATEXIT__ = [];
+
+ var __ATPOSTRUN__ = [];
+
+ var runtimeInitialized = false;
+
+ function preRun() {
+ if (Module["preRun"]) {
+ if (typeof Module["preRun"] == "function")
+ Module["preRun"] = [Module["preRun"]];
+ while (Module["preRun"].length) {
+ addOnPreRun(Module["preRun"].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPRERUN__);
+ }
+
+ function initRuntime() {
+ runtimeInitialized = true;
+ if (ENVIRONMENT_IS_PTHREAD) return;
+ if (!Module["noFSInit"] && !FS.init.initialized) FS.init();
+ FS.ignorePermissions = false;
+ TTY.init();
+ callRuntimeCallbacks(__ATINIT__);
+ }
+
+ function postRun() {
+ if (ENVIRONMENT_IS_PTHREAD) return;
+ if (Module["postRun"]) {
+ if (typeof Module["postRun"] == "function")
+ Module["postRun"] = [Module["postRun"]];
+ while (Module["postRun"].length) {
+ addOnPostRun(Module["postRun"].shift());
+ }
+ }
+ callRuntimeCallbacks(__ATPOSTRUN__);
+ }
+
+ function addOnPreRun(cb) {
+ __ATPRERUN__.unshift(cb);
+ }
+
+ function addOnInit(cb) {
+ __ATINIT__.unshift(cb);
+ }
+
+ function addOnExit(cb) {}
+
+ function addOnPostRun(cb) {
+ __ATPOSTRUN__.unshift(cb);
+ }
+
+ var runDependencies = 0;
+
+ var runDependencyWatcher = null;
+
+ var dependenciesFulfilled = null;
+
+ function getUniqueRunDependency(id) {
+ return id;
+ }
+
+ function addRunDependency(id) {
+ runDependencies++;
+ if (Module["monitorRunDependencies"]) {
+ Module["monitorRunDependencies"](runDependencies);
+ }
+ }
+
+ function removeRunDependency(id) {
+ runDependencies--;
+ if (Module["monitorRunDependencies"]) {
+ Module["monitorRunDependencies"](runDependencies);
+ }
+ if (runDependencies == 0) {
+ if (runDependencyWatcher !== null) {
+ clearInterval(runDependencyWatcher);
+ runDependencyWatcher = null;
+ }
+ if (dependenciesFulfilled) {
+ var callback = dependenciesFulfilled;
+ dependenciesFulfilled = null;
+ callback();
+ }
+ }
+ }
+
+ /** @param {string|number=} what */ function abort(what) {
+ if (Module["onAbort"]) {
+ Module["onAbort"](what);
+ }
+ what = "Aborted(" + what + ")";
+ err(what);
+ // ABORT = true; // this does not allow anything to work after being set, however we're actually okay to try other assets so we shouldn't do this
+ EXITSTATUS = 1;
+ what += ". Build with -sASSERTIONS for more info.";
+ /** @suppress {checkTypes} */ var e = new WebAssembly.RuntimeError(what);
+ readyPromiseReject(e);
+ throw e;
+ }
+
+ var dataURIPrefix = "data:application/octet-stream;base64,";
+
+ /**
+ * Indicates whether filename is a base64 data URI.
+ * @noinline
+ */ var isDataURI = (filename) => filename.startsWith(dataURIPrefix);
+
+ /**
+ * Indicates whether filename is delivered via file protocol (as opposed to http/https)
+ * @noinline
+ */ var isFileURI = (filename) => filename.startsWith("file://");
+
+ var wasmBinaryFile;
+
+ wasmBinaryFile = "emHdBindings.wasm";
+
+ if (!isDataURI(wasmBinaryFile)) {
+ wasmBinaryFile = locateFile(wasmBinaryFile);
+ }
+
+ function getBinarySync(file) {
+ if (file == wasmBinaryFile && wasmBinary) {
+ return new Uint8Array(wasmBinary);
+ }
+ if (readBinary) {
+ return readBinary(file);
+ }
+ throw "both async and sync fetching of the wasm failed";
+ }
+
+ function getBinaryPromise(binaryFile) {
+ if (!wasmBinary && (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER)) {
+ if (typeof fetch == "function" && !isFileURI(binaryFile)) {
+ return fetch(binaryFile, {
+ credentials: "same-origin",
+ })
+ .then((response) => {
+ if (!response["ok"]) {
+ throw "failed to load wasm binary file at '" + binaryFile + "'";
+ }
+ return response["arrayBuffer"]();
+ })
+ .catch(() => getBinarySync(binaryFile));
+ } else if (readAsync) {
+ return new Promise((resolve, reject) => {
+ readAsync(
+ binaryFile,
+ (response) =>
+ resolve(new Uint8Array(/** @type{!ArrayBuffer} */ (response))),
+ reject,
+ );
+ });
+ }
+ }
+ return Promise.resolve().then(() => getBinarySync(binaryFile));
+ }
+
+ function instantiateArrayBuffer(binaryFile, imports, receiver) {
+ return getBinaryPromise(binaryFile)
+ .then((binary) => WebAssembly.instantiate(binary, imports))
+ .then((instance) => instance)
+ .then(receiver, (reason) => {
+ err(`failed to asynchronously prepare wasm: ${reason}`);
+ abort(reason);
+ });
+ }
+
+ function instantiateAsync(binary, binaryFile, imports, callback) {
+ if (
+ !binary &&
+ typeof WebAssembly.instantiateStreaming == "function" &&
+ !isDataURI(binaryFile) &&
+ !isFileURI(binaryFile) &&
+ !ENVIRONMENT_IS_NODE &&
+ typeof fetch == "function"
+ ) {
+ return fetch(binaryFile, {
+ credentials: "same-origin",
+ }).then((response) => {
+ /** @suppress {checkTypes} */ var result =
+ WebAssembly.instantiateStreaming(response, imports);
+ return result.then(callback, function (reason) {
+ err(`wasm streaming compile failed: ${reason}`);
+ err("falling back to ArrayBuffer instantiation");
+ return instantiateArrayBuffer(binaryFile, imports, callback);
+ });
+ });
+ }
+ return instantiateArrayBuffer(binaryFile, imports, callback);
+ }
+
+ function createWasm() {
+ var info = {
+ env: wasmImports,
+ wasi_snapshot_preview1: wasmImports,
+ };
+ /** @param {WebAssembly.Module=} module*/ function receiveInstance(
+ instance,
+ module,
+ ) {
+ wasmExports = instance.exports;
+ wasmExports = Asyncify.instrumentWasmExports(wasmExports);
+ wasmExports = applySignatureConversions(wasmExports);
+ registerTLSInit(wasmExports["_emscripten_tls_init"]);
+ wasmTable = wasmExports["__indirect_function_table"];
+ addOnInit(wasmExports["__wasm_call_ctors"]);
+ wasmModule = module;
+ removeRunDependency("wasm-instantiate");
+ return wasmExports;
+ }
+ addRunDependency("wasm-instantiate");
+ function receiveInstantiationResult(result) {
+ receiveInstance(result["instance"], result["module"]);
+ }
+ if (Module["instantiateWasm"]) {
+ try {
+ return Module["instantiateWasm"](info, receiveInstance);
+ } catch (e) {
+ err(`Module.instantiateWasm callback failed with error: ${e}`);
+ readyPromiseReject(e);
+ }
+ }
+ instantiateAsync(
+ wasmBinary,
+ wasmBinaryFile,
+ info,
+ receiveInstantiationResult,
+ ).catch(readyPromiseReject);
+ return {};
+ }
+
+ var tempDouble;
+
+ var tempI64;
+
+ function __asyncjs__fetch_asset(route, dataPtr) {
+ return Asyncify.handleAsync(async () => {
+ const routeString = UTF8ToString(route);
+ const absoluteUrl = new URL(routeString);
+ try {
+ const response = await fetch(absoluteUrl);
+ if (!response.ok)
+ throw new Error("Fetch failed: " + response.statusText);
+ const buffer = await response.arrayBuffer();
+ const length = buffer.byteLength;
+ const ptr = _malloc(length);
+ GROWABLE_HEAP_U8().set(new Uint8Array(buffer), ptr >>> 0);
+ Module.HEAP32[dataPtr >> 2] = ptr;
+ Module.HEAP32[(dataPtr >> 2) + 1] = length;
+ } catch (err) {
+ console.error("Error in fetch_asset: ", err);
+ Module.HEAP32[dataPtr >> 2] = 0;
+ Module.HEAP32[(dataPtr >> 2) + 1] = 0;
+ }
+ });
+ }
+
+ function addToLoadedFiles(path) {
+ if (typeof self !== "undefined") {
+ if (typeof self.loadedFiles === "undefined") {
+ self.loadedFiles = [];
+ }
+ } else {
+ console.log("Neither window nor self is defined");
+ }
+ self.loadedFiles.push(UTF8ToString(path));
+ }
+
+ function downloadJS(data, filenamedata) {
+ const text = UTF8ToString(data);
+ const filename = UTF8ToString(filenamedata);
+ let element = document.createElement("a");
+ element.setAttribute(
+ "href",
+ "data:text/plain;charset=utf-8," + encodeURIComponent(text),
+ );
+ element.setAttribute("download", filename);
+ element.style.display = "none";
+ document.body.appendChild(element);
+ element.click();
+ document.body.removeChild(element);
+ }
+
+ /** @constructor */ function ExitStatus(status) {
+ this.name = "ExitStatus";
+ this.message = `Program terminated with exit(${status})`;
+ this.status = status;
+ }
+
+ var terminateWorker = (worker) => {
+ worker.terminate();
+ worker.onmessage = (e) => {};
+ };
+
+ var killThread = (pthread_ptr) => {
+ var worker = PThread.pthreads[pthread_ptr];
+ delete PThread.pthreads[pthread_ptr];
+ terminateWorker(worker);
+ __emscripten_thread_free_data(pthread_ptr);
+ PThread.runningWorkers.splice(PThread.runningWorkers.indexOf(worker), 1);
+ worker.pthread_ptr = 0;
+ };
+
+ var cancelThread = (pthread_ptr) => {
+ var worker = PThread.pthreads[pthread_ptr];
+ worker.postMessage({
+ cmd: "cancel",
+ });
+ };
+
+ var cleanupThread = (pthread_ptr) => {
+ var worker = PThread.pthreads[pthread_ptr];
+ assert(worker);
+ PThread.returnWorkerToPool(worker);
+ };
+
+ var zeroMemory = (address, size) => {
+ GROWABLE_HEAP_U8().fill(0, address, address + size);
+ return address;
+ };
+
+ var spawnThread = (threadParams) => {
+ var worker = PThread.getNewWorker();
+ if (!worker) {
+ return 6;
+ }
+ PThread.runningWorkers.push(worker);
+ PThread.pthreads[threadParams.pthread_ptr] = worker;
+ worker.pthread_ptr = threadParams.pthread_ptr;
+ var msg = {
+ cmd: "run",
+ start_routine: threadParams.startRoutine,
+ arg: threadParams.arg,
+ pthread_ptr: threadParams.pthread_ptr,
+ };
+ if (ENVIRONMENT_IS_NODE) {
+ worker.unref();
+ }
+ worker.postMessage(msg, threadParams.transferList);
+ return 0;
+ };
+
+ var runtimeKeepaliveCounter = 0;
+
+ var keepRuntimeAlive = () => noExitRuntime || runtimeKeepaliveCounter > 0;
+
+ var PATH = {
+ isAbs: (path) => path.charAt(0) === "/",
+ splitPath: (filename) => {
+ var splitPathRe =
+ /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/;
+ return splitPathRe.exec(filename).slice(1);
+ },
+ normalizeArray: (parts, allowAboveRoot) => {
+ var up = 0;
+ for (var i = parts.length - 1; i >= 0; i--) {
+ var last = parts[i];
+ if (last === ".") {
+ parts.splice(i, 1);
+ } else if (last === "..") {
+ parts.splice(i, 1);
+ up++;
+ } else if (up) {
+ parts.splice(i, 1);
+ up--;
+ }
+ }
+ if (allowAboveRoot) {
+ for (; up; up--) {
+ parts.unshift("..");
+ }
+ }
+ return parts;
+ },
+ normalize: (path) => {
+ var isAbsolute = PATH.isAbs(path),
+ trailingSlash = path.substr(-1) === "/";
+ path = PATH.normalizeArray(
+ path.split("/").filter((p) => !!p),
+ !isAbsolute,
+ ).join("/");
+ if (!path && !isAbsolute) {
+ path = ".";
+ }
+ if (path && trailingSlash) {
+ path += "/";
+ }
+ return (isAbsolute ? "/" : "") + path;
+ },
+ dirname: (path) => {
+ var result = PATH.splitPath(path),
+ root = result[0],
+ dir = result[1];
+ if (!root && !dir) {
+ return ".";
+ }
+ if (dir) {
+ dir = dir.substr(0, dir.length - 1);
+ }
+ return root + dir;
+ },
+ basename: (path) => {
+ if (path === "/") return "/";
+ path = PATH.normalize(path);
+ path = path.replace(/\/$/, "");
+ var lastSlash = path.lastIndexOf("/");
+ if (lastSlash === -1) return path;
+ return path.substr(lastSlash + 1);
+ },
+ join: function () {
+ var paths = Array.prototype.slice.call(arguments);
+ return PATH.normalize(paths.join("/"));
+ },
+ join2: (l, r) => PATH.normalize(l + "/" + r),
+ };
+
+ var initRandomFill = () => {
+ if (
+ typeof crypto == "object" &&
+ typeof crypto["getRandomValues"] == "function"
+ ) {
+ return (view) => (
+ view.set(crypto.getRandomValues(new Uint8Array(view.byteLength))),
+ view
+ );
+ } else if (ENVIRONMENT_IS_NODE) {
+ try {
+ var crypto_module = require("crypto");
+ var randomFillSync = crypto_module["randomFillSync"];
+ if (randomFillSync) {
+ return (view) => crypto_module["randomFillSync"](view);
+ }
+ var randomBytes = crypto_module["randomBytes"];
+ return (view) => (view.set(randomBytes(view.byteLength)), view);
+ } catch (e) {}
+ }
+ abort("initRandomDevice");
+ };
+
+ var randomFill = (view) => (randomFill = initRandomFill())(view);
+
+ var PATH_FS = {
+ resolve: function () {
+ var resolvedPath = "",
+ resolvedAbsolute = false;
+ for (var i = arguments.length - 1; i >= -1 && !resolvedAbsolute; i--) {
+ var path = i >= 0 ? arguments[i] : FS.cwd();
+ if (typeof path != "string") {
+ throw new TypeError("Arguments to path.resolve must be strings");
+ } else if (!path) {
+ return "";
+ }
+ resolvedPath = path + "/" + resolvedPath;
+ resolvedAbsolute = PATH.isAbs(path);
+ }
+ resolvedPath = PATH.normalizeArray(
+ resolvedPath.split("/").filter((p) => !!p),
+ !resolvedAbsolute,
+ ).join("/");
+ return (resolvedAbsolute ? "/" : "") + resolvedPath || ".";
+ },
+ relative: (from, to) => {
+ from = PATH_FS.resolve(from).substr(1);
+ to = PATH_FS.resolve(to).substr(1);
+ function trim(arr) {
+ var start = 0;
+ for (; start < arr.length; start++) {
+ if (arr[start] !== "") break;
+ }
+ var end = arr.length - 1;
+ for (; end >= 0; end--) {
+ if (arr[end] !== "") break;
+ }
+ if (start > end) return [];
+ return arr.slice(start, end - start + 1);
+ }
+ var fromParts = trim(from.split("/"));
+ var toParts = trim(to.split("/"));
+ var length = Math.min(fromParts.length, toParts.length);
+ var samePartsLength = length;
+ for (var i = 0; i < length; i++) {
+ if (fromParts[i] !== toParts[i]) {
+ samePartsLength = i;
+ break;
+ }
+ }
+ var outputParts = [];
+ for (var i = samePartsLength; i < fromParts.length; i++) {
+ outputParts.push("..");
+ }
+ outputParts = outputParts.concat(toParts.slice(samePartsLength));
+ return outputParts.join("/");
+ },
+ };
+
+ var UTF8Decoder =
+ typeof TextDecoder != "undefined" ? new TextDecoder("utf8") : undefined;
+
+ /**
+ * Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given
+ * array that contains uint8 values, returns a copy of that string as a
+ * Javascript String object.
+ * heapOrArray is either a regular array, or a JavaScript typed array view.
+ * @param {number} idx
+ * @param {number=} maxBytesToRead
+ * @return {string}
+ */ var UTF8ArrayToString = (heapOrArray, idx, maxBytesToRead) => {
+ idx >>>= 0;
+ var endIdx = idx + maxBytesToRead;
+ var endPtr = idx;
+ while (heapOrArray[endPtr] && !(endPtr >= endIdx)) ++endPtr;
+ if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) {
+ return UTF8Decoder.decode(
+ heapOrArray.buffer instanceof SharedArrayBuffer
+ ? heapOrArray.slice(idx, endPtr)
+ : heapOrArray.subarray(idx, endPtr),
+ );
+ }
+ var str = "";
+ while (idx < endPtr) {
+ var u0 = heapOrArray[idx++];
+ if (!(u0 & 128)) {
+ str += String.fromCharCode(u0);
+ continue;
+ }
+ var u1 = heapOrArray[idx++] & 63;
+ if ((u0 & 224) == 192) {
+ str += String.fromCharCode(((u0 & 31) << 6) | u1);
+ continue;
+ }
+ var u2 = heapOrArray[idx++] & 63;
+ if ((u0 & 240) == 224) {
+ u0 = ((u0 & 15) << 12) | (u1 << 6) | u2;
+ } else {
+ u0 =
+ ((u0 & 7) << 18) |
+ (u1 << 12) |
+ (u2 << 6) |
+ (heapOrArray[idx++] & 63);
+ }
+ if (u0 < 65536) {
+ str += String.fromCharCode(u0);
+ } else {
+ var ch = u0 - 65536;
+ str += String.fromCharCode(55296 | (ch >> 10), 56320 | (ch & 1023));
+ }
+ }
+ return str;
+ };
+
+ var FS_stdin_getChar_buffer = [];
+
+ var lengthBytesUTF8 = (str) => {
+ var len = 0;
+ for (var i = 0; i < str.length; ++i) {
+ var c = str.charCodeAt(i);
+ if (c <= 127) {
+ len++;
+ } else if (c <= 2047) {
+ len += 2;
+ } else if (c >= 55296 && c <= 57343) {
+ len += 4;
+ ++i;
+ } else {
+ len += 3;
+ }
+ }
+ return len;
+ };
+
+ var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => {
+ outIdx >>>= 0;
+ if (!(maxBytesToWrite > 0)) return 0;
+ var startIdx = outIdx;
+ var endIdx = outIdx + maxBytesToWrite - 1;
+ for (var i = 0; i < str.length; ++i) {
+ var u = str.charCodeAt(i);
+ if (u >= 55296 && u <= 57343) {
+ var u1 = str.charCodeAt(++i);
+ u = (65536 + ((u & 1023) << 10)) | (u1 & 1023);
+ }
+ if (u <= 127) {
+ if (outIdx >= endIdx) break;
+ heap[outIdx++ >>> 0] = u;
+ } else if (u <= 2047) {
+ if (outIdx + 1 >= endIdx) break;
+ heap[outIdx++ >>> 0] = 192 | (u >> 6);
+ heap[outIdx++ >>> 0] = 128 | (u & 63);
+ } else if (u <= 65535) {
+ if (outIdx + 2 >= endIdx) break;
+ heap[outIdx++ >>> 0] = 224 | (u >> 12);
+ heap[outIdx++ >>> 0] = 128 | ((u >> 6) & 63);
+ heap[outIdx++ >>> 0] = 128 | (u & 63);
+ } else {
+ if (outIdx + 3 >= endIdx) break;
+ heap[outIdx++ >>> 0] = 240 | (u >> 18);
+ heap[outIdx++ >>> 0] = 128 | ((u >> 12) & 63);
+ heap[outIdx++ >>> 0] = 128 | ((u >> 6) & 63);
+ heap[outIdx++ >>> 0] = 128 | (u & 63);
+ }
+ }
+ heap[outIdx >>> 0] = 0;
+ return outIdx - startIdx;
+ };
+
+ /** @type {function(string, boolean=, number=)} */ function intArrayFromString(
+ stringy,
+ dontAddNull,
+ length,
+ ) {
+ var len = length > 0 ? length : lengthBytesUTF8(stringy) + 1;
+ var u8array = new Array(len);
+ var numBytesWritten = stringToUTF8Array(
+ stringy,
+ u8array,
+ 0,
+ u8array.length,
+ );
+ if (dontAddNull) u8array.length = numBytesWritten;
+ return u8array;
+ }
+
+ var FS_stdin_getChar = () => {
+ if (!FS_stdin_getChar_buffer.length) {
+ var result = null;
+ if (ENVIRONMENT_IS_NODE) {
+ var BUFSIZE = 256;
+ var buf = Buffer.alloc(BUFSIZE);
+ var bytesRead = 0;
+ /** @suppress {missingProperties} */ var fd = process.stdin.fd;
+ try {
+ bytesRead = fs.readSync(fd, buf);
+ } catch (e) {
+ if (e.toString().includes("EOF")) bytesRead = 0;
+ else throw e;
+ }
+ if (bytesRead > 0) {
+ result = buf.slice(0, bytesRead).toString("utf-8");
+ } else {
+ result = null;
+ }
+ } else if (
+ typeof window != "undefined" &&
+ typeof window.prompt == "function"
+ ) {
+ result = window.prompt("Input: ");
+ if (result !== null) {
+ result += "\n";
+ }
+ } else if (typeof readline == "function") {
+ result = readline();
+ if (result !== null) {
+ result += "\n";
+ }
+ }
+ if (!result) {
+ return null;
+ }
+ FS_stdin_getChar_buffer = intArrayFromString(result, true);
+ }
+ return FS_stdin_getChar_buffer.shift();
+ };
+
+ var TTY = {
+ ttys: [],
+ init() {},
+ shutdown() {},
+ register(dev, ops) {
+ TTY.ttys[dev] = {
+ input: [],
+ output: [],
+ ops: ops,
+ };
+ FS.registerDevice(dev, TTY.stream_ops);
+ },
+ stream_ops: {
+ open(stream) {
+ var tty = TTY.ttys[stream.node.rdev];
+ if (!tty) {
+ throw new FS.ErrnoError(43);
+ }
+ stream.tty = tty;
+ stream.seekable = false;
+ },
+ close(stream) {
+ stream.tty.ops.fsync(stream.tty);
+ },
+ fsync(stream) {
+ stream.tty.ops.fsync(stream.tty);
+ },
+ read(stream, buffer, offset, length, pos) {
+ /* ignored */ if (!stream.tty || !stream.tty.ops.get_char) {
+ throw new FS.ErrnoError(60);
+ }
+ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = stream.tty.ops.get_char(stream.tty);
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(6);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset + i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, pos) {
+ if (!stream.tty || !stream.tty.ops.put_char) {
+ throw new FS.ErrnoError(60);
+ }
+ try {
+ for (var i = 0; i < length; i++) {
+ stream.tty.ops.put_char(stream.tty, buffer[offset + i]);
+ }
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ },
+ },
+ default_tty_ops: {
+ get_char(tty) {
+ return FS_stdin_getChar();
+ },
+ put_char(tty, val) {
+ if (val === null || val === 10) {
+ out(UTF8ArrayToString(tty.output, 0));
+ tty.output = [];
+ } else {
+ if (val != 0) tty.output.push(val);
+ }
+ },
+ fsync(tty) {
+ if (tty.output && tty.output.length > 0) {
+ out(UTF8ArrayToString(tty.output, 0));
+ tty.output = [];
+ }
+ },
+ ioctl_tcgets(tty) {
+ return {
+ c_iflag: 25856,
+ c_oflag: 5,
+ c_cflag: 191,
+ c_lflag: 35387,
+ c_cc: [
+ 3, 28, 127, 21, 4, 0, 1, 0, 17, 19, 26, 0, 18, 15, 23, 22, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ],
+ };
+ },
+ ioctl_tcsets(tty, optional_actions, data) {
+ return 0;
+ },
+ ioctl_tiocgwinsz(tty) {
+ return [24, 80];
+ },
+ },
+ default_tty1_ops: {
+ put_char(tty, val) {
+ if (val === null || val === 10) {
+ err(UTF8ArrayToString(tty.output, 0));
+ tty.output = [];
+ } else {
+ if (val != 0) tty.output.push(val);
+ }
+ },
+ fsync(tty) {
+ if (tty.output && tty.output.length > 0) {
+ err(UTF8ArrayToString(tty.output, 0));
+ tty.output = [];
+ }
+ },
+ },
+ };
+
+ var alignMemory = (size, alignment) =>
+ Math.ceil(size / alignment) * alignment;
+
+ var mmapAlloc = (size) => {
+ size = alignMemory(size, 65536);
+ var ptr = _emscripten_builtin_memalign(65536, size);
+ if (!ptr) return 0;
+ return zeroMemory(ptr, size);
+ };
+
+ var MEMFS = {
+ ops_table: null,
+ mount(mount) {
+ return MEMFS.createNode(null, "/", 16384 | 511, /* 0777 */ 0);
+ },
+ createNode(parent, name, mode, dev) {
+ if (FS.isBlkdev(mode) || FS.isFIFO(mode)) {
+ throw new FS.ErrnoError(63);
+ }
+ if (!MEMFS.ops_table) {
+ MEMFS.ops_table = {
+ dir: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ lookup: MEMFS.node_ops.lookup,
+ mknod: MEMFS.node_ops.mknod,
+ rename: MEMFS.node_ops.rename,
+ unlink: MEMFS.node_ops.unlink,
+ rmdir: MEMFS.node_ops.rmdir,
+ readdir: MEMFS.node_ops.readdir,
+ symlink: MEMFS.node_ops.symlink,
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ },
+ },
+ file: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ },
+ stream: {
+ llseek: MEMFS.stream_ops.llseek,
+ read: MEMFS.stream_ops.read,
+ write: MEMFS.stream_ops.write,
+ allocate: MEMFS.stream_ops.allocate,
+ mmap: MEMFS.stream_ops.mmap,
+ msync: MEMFS.stream_ops.msync,
+ },
+ },
+ link: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ readlink: MEMFS.node_ops.readlink,
+ },
+ stream: {},
+ },
+ chrdev: {
+ node: {
+ getattr: MEMFS.node_ops.getattr,
+ setattr: MEMFS.node_ops.setattr,
+ },
+ stream: FS.chrdev_stream_ops,
+ },
+ };
+ }
+ var node = FS.createNode(parent, name, mode, dev);
+ if (FS.isDir(node.mode)) {
+ node.node_ops = MEMFS.ops_table.dir.node;
+ node.stream_ops = MEMFS.ops_table.dir.stream;
+ node.contents = {};
+ } else if (FS.isFile(node.mode)) {
+ node.node_ops = MEMFS.ops_table.file.node;
+ node.stream_ops = MEMFS.ops_table.file.stream;
+ node.usedBytes = 0;
+ node.contents = null;
+ } else if (FS.isLink(node.mode)) {
+ node.node_ops = MEMFS.ops_table.link.node;
+ node.stream_ops = MEMFS.ops_table.link.stream;
+ } else if (FS.isChrdev(node.mode)) {
+ node.node_ops = MEMFS.ops_table.chrdev.node;
+ node.stream_ops = MEMFS.ops_table.chrdev.stream;
+ }
+ node.timestamp = Date.now();
+ if (parent) {
+ parent.contents[name] = node;
+ parent.timestamp = node.timestamp;
+ }
+ return node;
+ },
+ getFileDataAsTypedArray(node) {
+ if (!node.contents) return new Uint8Array(0);
+ if (node.contents.subarray)
+ return node.contents.subarray(0, node.usedBytes);
+ return new Uint8Array(node.contents);
+ },
+ expandFileStorage(node, newCapacity) {
+ var prevCapacity = node.contents ? node.contents.length : 0;
+ if (prevCapacity >= newCapacity) return;
+ var CAPACITY_DOUBLING_MAX = 1024 * 1024;
+ newCapacity = Math.max(
+ newCapacity,
+ (prevCapacity *
+ (prevCapacity < CAPACITY_DOUBLING_MAX ? 2 : 1.125)) >>>
+ 0,
+ );
+ if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256);
+ var oldContents = node.contents;
+ node.contents = new Uint8Array(newCapacity);
+ if (node.usedBytes > 0)
+ node.contents.set(oldContents.subarray(0, node.usedBytes), 0);
+ },
+ resizeFileStorage(node, newSize) {
+ if (node.usedBytes == newSize) return;
+ if (newSize == 0) {
+ node.contents = null;
+ node.usedBytes = 0;
+ } else {
+ var oldContents = node.contents;
+ node.contents = new Uint8Array(newSize);
+ if (oldContents) {
+ node.contents.set(
+ oldContents.subarray(0, Math.min(newSize, node.usedBytes)),
+ );
+ }
+ node.usedBytes = newSize;
+ }
+ },
+ node_ops: {
+ getattr(node) {
+ var attr = {};
+ attr.dev = FS.isChrdev(node.mode) ? node.id : 1;
+ attr.ino = node.id;
+ attr.mode = node.mode;
+ attr.nlink = 1;
+ attr.uid = 0;
+ attr.gid = 0;
+ attr.rdev = node.rdev;
+ if (FS.isDir(node.mode)) {
+ attr.size = 4096;
+ } else if (FS.isFile(node.mode)) {
+ attr.size = node.usedBytes;
+ } else if (FS.isLink(node.mode)) {
+ attr.size = node.link.length;
+ } else {
+ attr.size = 0;
+ }
+ attr.atime = new Date(node.timestamp);
+ attr.mtime = new Date(node.timestamp);
+ attr.ctime = new Date(node.timestamp);
+ attr.blksize = 4096;
+ attr.blocks = Math.ceil(attr.size / attr.blksize);
+ return attr;
+ },
+ setattr(node, attr) {
+ if (attr.mode !== undefined) {
+ node.mode = attr.mode;
+ }
+ if (attr.timestamp !== undefined) {
+ node.timestamp = attr.timestamp;
+ }
+ if (attr.size !== undefined) {
+ MEMFS.resizeFileStorage(node, attr.size);
+ }
+ },
+ lookup(parent, name) {
+ throw FS.genericErrors[44];
+ },
+ mknod(parent, name, mode, dev) {
+ return MEMFS.createNode(parent, name, mode, dev);
+ },
+ rename(old_node, new_dir, new_name) {
+ if (FS.isDir(old_node.mode)) {
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {}
+ if (new_node) {
+ for (var i in new_node.contents) {
+ throw new FS.ErrnoError(55);
+ }
+ }
+ }
+ delete old_node.parent.contents[old_node.name];
+ old_node.parent.timestamp = Date.now();
+ old_node.name = new_name;
+ new_dir.contents[new_name] = old_node;
+ new_dir.timestamp = old_node.parent.timestamp;
+ old_node.parent = new_dir;
+ },
+ unlink(parent, name) {
+ delete parent.contents[name];
+ parent.timestamp = Date.now();
+ },
+ rmdir(parent, name) {
+ var node = FS.lookupNode(parent, name);
+ for (var i in node.contents) {
+ throw new FS.ErrnoError(55);
+ }
+ delete parent.contents[name];
+ parent.timestamp = Date.now();
+ },
+ readdir(node) {
+ var entries = [".", ".."];
+ for (var key in node.contents) {
+ if (!node.contents.hasOwnProperty(key)) {
+ continue;
+ }
+ entries.push(key);
+ }
+ return entries;
+ },
+ symlink(parent, newname, oldpath) {
+ var node = MEMFS.createNode(
+ parent,
+ newname,
+ 511 | /* 0777 */ 40960,
+ 0,
+ );
+ node.link = oldpath;
+ return node;
+ },
+ readlink(node) {
+ if (!FS.isLink(node.mode)) {
+ throw new FS.ErrnoError(28);
+ }
+ return node.link;
+ },
+ },
+ stream_ops: {
+ read(stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= stream.node.usedBytes) return 0;
+ var size = Math.min(stream.node.usedBytes - position, length);
+ if (size > 8 && contents.subarray) {
+ buffer.set(contents.subarray(position, position + size), offset);
+ } else {
+ for (var i = 0; i < size; i++)
+ buffer[offset + i] = contents[position + i];
+ }
+ return size;
+ },
+ write(stream, buffer, offset, length, position, canOwn) {
+ if (buffer.buffer === GROWABLE_HEAP_I8().buffer) {
+ canOwn = false;
+ }
+ if (!length) return 0;
+ var node = stream.node;
+ node.timestamp = Date.now();
+ if (buffer.subarray && (!node.contents || node.contents.subarray)) {
+ if (canOwn) {
+ node.contents = buffer.subarray(offset, offset + length);
+ node.usedBytes = length;
+ return length;
+ } else if (node.usedBytes === 0 && position === 0) {
+ node.contents = buffer.slice(offset, offset + length);
+ node.usedBytes = length;
+ return length;
+ } else if (position + length <= node.usedBytes) {
+ node.contents.set(
+ buffer.subarray(offset, offset + length),
+ position,
+ );
+ return length;
+ }
+ }
+ MEMFS.expandFileStorage(node, position + length);
+ if (node.contents.subarray && buffer.subarray) {
+ node.contents.set(
+ buffer.subarray(offset, offset + length),
+ position,
+ );
+ } else {
+ for (var i = 0; i < length; i++) {
+ node.contents[position + i] = buffer[offset + i];
+ }
+ }
+ node.usedBytes = Math.max(node.usedBytes, position + length);
+ return length;
+ },
+ llseek(stream, offset, whence) {
+ var position = offset;
+ if (whence === 1) {
+ position += stream.position;
+ } else if (whence === 2) {
+ if (FS.isFile(stream.node.mode)) {
+ position += stream.node.usedBytes;
+ }
+ }
+ if (position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ return position;
+ },
+ allocate(stream, offset, length) {
+ MEMFS.expandFileStorage(stream.node, offset + length);
+ stream.node.usedBytes = Math.max(
+ stream.node.usedBytes,
+ offset + length,
+ );
+ },
+ mmap(stream, length, position, prot, flags) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ var ptr;
+ var allocated;
+ var contents = stream.node.contents;
+ if (!(flags & 2) && contents.buffer === GROWABLE_HEAP_I8().buffer) {
+ allocated = false;
+ ptr = contents.byteOffset;
+ } else {
+ if (position > 0 || position + length < contents.length) {
+ if (contents.subarray) {
+ contents = contents.subarray(position, position + length);
+ } else {
+ contents = Array.prototype.slice.call(
+ contents,
+ position,
+ position + length,
+ );
+ }
+ }
+ allocated = true;
+ ptr = mmapAlloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(48);
+ }
+ GROWABLE_HEAP_I8().set(contents, ptr >>> 0);
+ }
+ return {
+ ptr: ptr,
+ allocated: allocated,
+ };
+ },
+ msync(stream, buffer, offset, length, mmapFlags) {
+ MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false);
+ return 0;
+ },
+ },
+ };
+
+ /** @param {boolean=} noRunDep */ var asyncLoad = (
+ url,
+ onload,
+ onerror,
+ noRunDep,
+ ) => {
+ var dep = !noRunDep ? getUniqueRunDependency(`al ${url}`) : "";
+ readAsync(
+ url,
+ (arrayBuffer) => {
+ assert(
+ arrayBuffer,
+ `Loading data file "${url}" failed (no arrayBuffer).`,
+ );
+ onload(new Uint8Array(arrayBuffer));
+ if (dep) removeRunDependency(dep);
+ },
+ (event) => {
+ if (onerror) {
+ onerror();
+ } else {
+ throw `Loading data file "${url}" failed.`;
+ }
+ },
+ );
+ if (dep) addRunDependency(dep);
+ };
+
+ var FS_createDataFile = (
+ parent,
+ name,
+ fileData,
+ canRead,
+ canWrite,
+ canOwn,
+ ) => FS.createDataFile(parent, name, fileData, canRead, canWrite, canOwn);
+
+ var preloadPlugins = Module["preloadPlugins"] || [];
+
+ var FS_handledByPreloadPlugin = (byteArray, fullname, finish, onerror) => {
+ if (typeof Browser != "undefined") Browser.init();
+ var handled = false;
+ preloadPlugins.forEach((plugin) => {
+ if (handled) return;
+ if (plugin["canHandle"](fullname)) {
+ plugin["handle"](byteArray, fullname, finish, onerror);
+ handled = true;
+ }
+ });
+ return handled;
+ };
+
+ var FS_createPreloadedFile = (
+ parent,
+ name,
+ url,
+ canRead,
+ canWrite,
+ onload,
+ onerror,
+ dontCreateFile,
+ canOwn,
+ preFinish,
+ ) => {
+ var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent;
+ var dep = getUniqueRunDependency(`cp ${fullname}`);
+ function processData(byteArray) {
+ function finish(byteArray) {
+ if (preFinish) preFinish();
+ if (!dontCreateFile) {
+ FS_createDataFile(
+ parent,
+ name,
+ byteArray,
+ canRead,
+ canWrite,
+ canOwn,
+ );
+ }
+ if (onload) onload();
+ removeRunDependency(dep);
+ }
+ if (
+ FS_handledByPreloadPlugin(byteArray, fullname, finish, () => {
+ if (onerror) onerror();
+ removeRunDependency(dep);
+ })
+ ) {
+ return;
+ }
+ finish(byteArray);
+ }
+ addRunDependency(dep);
+ if (typeof url == "string") {
+ asyncLoad(url, (byteArray) => processData(byteArray), onerror);
+ } else {
+ processData(url);
+ }
+ };
+
+ var FS_modeStringToFlags = (str) => {
+ var flagModes = {
+ r: 0,
+ "r+": 2,
+ w: 512 | 64 | 1,
+ "w+": 512 | 64 | 2,
+ a: 1024 | 64 | 1,
+ "a+": 1024 | 64 | 2,
+ };
+ var flags = flagModes[str];
+ if (typeof flags == "undefined") {
+ throw new Error(`Unknown file open mode: ${str}`);
+ }
+ return flags;
+ };
+
+ var FS_getMode = (canRead, canWrite) => {
+ var mode = 0;
+ if (canRead) mode |= 292 | 73;
+ if (canWrite) mode |= 146;
+ return mode;
+ };
+
+ var FS = {
+ root: null,
+ mounts: [],
+ devices: {},
+ streams: [],
+ nextInode: 1,
+ nameTable: null,
+ currentPath: "/",
+ initialized: false,
+ ignorePermissions: true,
+ ErrnoError: null,
+ genericErrors: {},
+ filesystems: null,
+ syncFSRequests: 0,
+ lookupPath(path, opts = {}) {
+ path = PATH_FS.resolve(path);
+ if (!path)
+ return {
+ path: "",
+ node: null,
+ };
+ var defaults = {
+ follow_mount: true,
+ recurse_count: 0,
+ };
+ opts = Object.assign(defaults, opts);
+ if (opts.recurse_count > 8) {
+ throw new FS.ErrnoError(32);
+ }
+ var parts = path.split("/").filter((p) => !!p);
+ var current = FS.root;
+ var current_path = "/";
+ for (var i = 0; i < parts.length; i++) {
+ var islast = i === parts.length - 1;
+ if (islast && opts.parent) {
+ break;
+ }
+ current = FS.lookupNode(current, parts[i]);
+ current_path = PATH.join2(current_path, parts[i]);
+ if (FS.isMountpoint(current)) {
+ if (!islast || (islast && opts.follow_mount)) {
+ current = current.mounted.root;
+ }
+ }
+ if (!islast || opts.follow) {
+ var count = 0;
+ while (FS.isLink(current.mode)) {
+ var link = FS.readlink(current_path);
+ current_path = PATH_FS.resolve(PATH.dirname(current_path), link);
+ var lookup = FS.lookupPath(current_path, {
+ recurse_count: opts.recurse_count + 1,
+ });
+ current = lookup.node;
+ if (count++ > 40) {
+ throw new FS.ErrnoError(32);
+ }
+ }
+ }
+ }
+ return {
+ path: current_path,
+ node: current,
+ };
+ },
+ getPath(node) {
+ var path;
+ while (true) {
+ if (FS.isRoot(node)) {
+ var mount = node.mount.mountpoint;
+ if (!path) return mount;
+ return mount[mount.length - 1] !== "/"
+ ? `${mount}/${path}`
+ : mount + path;
+ }
+ path = path ? `${node.name}/${path}` : node.name;
+ node = node.parent;
+ }
+ },
+ hashName(parentid, name) {
+ var hash = 0;
+ for (var i = 0; i < name.length; i++) {
+ hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0;
+ }
+ return ((parentid + hash) >>> 0) % FS.nameTable.length;
+ },
+ hashAddNode(node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ node.name_next = FS.nameTable[hash];
+ FS.nameTable[hash] = node;
+ },
+ hashRemoveNode(node) {
+ var hash = FS.hashName(node.parent.id, node.name);
+ if (FS.nameTable[hash] === node) {
+ FS.nameTable[hash] = node.name_next;
+ } else {
+ var current = FS.nameTable[hash];
+ while (current) {
+ if (current.name_next === node) {
+ current.name_next = node.name_next;
+ break;
+ }
+ current = current.name_next;
+ }
+ }
+ },
+ lookupNode(parent, name) {
+ var errCode = FS.mayLookup(parent);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode, parent);
+ }
+ var hash = FS.hashName(parent.id, name);
+ for (var node = FS.nameTable[hash]; node; node = node.name_next) {
+ var nodeName = node.name;
+ if (node.parent.id === parent.id && nodeName === name) {
+ return node;
+ }
+ }
+ return FS.lookup(parent, name);
+ },
+ createNode(parent, name, mode, rdev) {
+ var node = new FS.FSNode(parent, name, mode, rdev);
+ FS.hashAddNode(node);
+ return node;
+ },
+ destroyNode(node) {
+ FS.hashRemoveNode(node);
+ },
+ isRoot(node) {
+ return node === node.parent;
+ },
+ isMountpoint(node) {
+ return !!node.mounted;
+ },
+ isFile(mode) {
+ return (mode & 61440) === 32768;
+ },
+ isDir(mode) {
+ return (mode & 61440) === 16384;
+ },
+ isLink(mode) {
+ return (mode & 61440) === 40960;
+ },
+ isChrdev(mode) {
+ return (mode & 61440) === 8192;
+ },
+ isBlkdev(mode) {
+ return (mode & 61440) === 24576;
+ },
+ isFIFO(mode) {
+ return (mode & 61440) === 4096;
+ },
+ isSocket(mode) {
+ return (mode & 49152) === 49152;
+ },
+ flagsToPermissionString(flag) {
+ var perms = ["r", "w", "rw"][flag & 3];
+ if (flag & 512) {
+ perms += "w";
+ }
+ return perms;
+ },
+ nodePermissions(node, perms) {
+ if (FS.ignorePermissions) {
+ return 0;
+ }
+ if (perms.includes("r") && !(node.mode & 292)) {
+ return 2;
+ } else if (perms.includes("w") && !(node.mode & 146)) {
+ return 2;
+ } else if (perms.includes("x") && !(node.mode & 73)) {
+ return 2;
+ }
+ return 0;
+ },
+ mayLookup(dir) {
+ var errCode = FS.nodePermissions(dir, "x");
+ if (errCode) return errCode;
+ if (!dir.node_ops.lookup) return 2;
+ return 0;
+ },
+ mayCreate(dir, name) {
+ try {
+ var node = FS.lookupNode(dir, name);
+ return 20;
+ } catch (e) {}
+ return FS.nodePermissions(dir, "wx");
+ },
+ mayDelete(dir, name, isdir) {
+ var node;
+ try {
+ node = FS.lookupNode(dir, name);
+ } catch (e) {
+ return e.errno;
+ }
+ var errCode = FS.nodePermissions(dir, "wx");
+ if (errCode) {
+ return errCode;
+ }
+ if (isdir) {
+ if (!FS.isDir(node.mode)) {
+ return 54;
+ }
+ if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) {
+ return 10;
+ }
+ } else {
+ if (FS.isDir(node.mode)) {
+ return 31;
+ }
+ }
+ return 0;
+ },
+ mayOpen(node, flags) {
+ if (!node) {
+ return 44;
+ }
+ if (FS.isLink(node.mode)) {
+ return 32;
+ } else if (FS.isDir(node.mode)) {
+ if (FS.flagsToPermissionString(flags) !== "r" || flags & 512) {
+ return 31;
+ }
+ }
+ return FS.nodePermissions(node, FS.flagsToPermissionString(flags));
+ },
+ MAX_OPEN_FDS: 4096,
+ nextfd() {
+ for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) {
+ if (!FS.streams[fd]) {
+ return fd;
+ }
+ }
+ throw new FS.ErrnoError(33);
+ },
+ getStreamChecked(fd) {
+ var stream = FS.getStream(fd);
+ if (!stream) {
+ throw new FS.ErrnoError(8);
+ }
+ return stream;
+ },
+ getStream: (fd) => FS.streams[fd],
+ createStream(stream, fd = -1) {
+ if (!FS.FSStream) {
+ FS.FSStream = /** @constructor */ function () {
+ this.shared = {};
+ };
+ FS.FSStream.prototype = {};
+ Object.defineProperties(FS.FSStream.prototype, {
+ object: {
+ /** @this {FS.FSStream} */ get() {
+ return this.node;
+ },
+ /** @this {FS.FSStream} */ set(val) {
+ this.node = val;
+ },
+ },
+ isRead: {
+ /** @this {FS.FSStream} */ get() {
+ return (this.flags & 2097155) !== 1;
+ },
+ },
+ isWrite: {
+ /** @this {FS.FSStream} */ get() {
+ return (this.flags & 2097155) !== 0;
+ },
+ },
+ isAppend: {
+ /** @this {FS.FSStream} */ get() {
+ return this.flags & 1024;
+ },
+ },
+ flags: {
+ /** @this {FS.FSStream} */ get() {
+ return this.shared.flags;
+ },
+ /** @this {FS.FSStream} */ set(val) {
+ this.shared.flags = val;
+ },
+ },
+ position: {
+ /** @this {FS.FSStream} */ get() {
+ return this.shared.position;
+ },
+ /** @this {FS.FSStream} */ set(val) {
+ this.shared.position = val;
+ },
+ },
+ });
+ }
+ stream = Object.assign(new FS.FSStream(), stream);
+ if (fd == -1) {
+ fd = FS.nextfd();
+ }
+ stream.fd = fd;
+ FS.streams[fd] = stream;
+ return stream;
+ },
+ closeStream(fd) {
+ FS.streams[fd] = null;
+ },
+ chrdev_stream_ops: {
+ open(stream) {
+ var device = FS.getDevice(stream.node.rdev);
+ stream.stream_ops = device.stream_ops;
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ },
+ llseek() {
+ throw new FS.ErrnoError(70);
+ },
+ },
+ major: (dev) => dev >> 8,
+ minor: (dev) => dev & 255,
+ makedev: (ma, mi) => (ma << 8) | mi,
+ registerDevice(dev, ops) {
+ FS.devices[dev] = {
+ stream_ops: ops,
+ };
+ },
+ getDevice: (dev) => FS.devices[dev],
+ getMounts(mount) {
+ var mounts = [];
+ var check = [mount];
+ while (check.length) {
+ var m = check.pop();
+ mounts.push(m);
+ check.push.apply(check, m.mounts);
+ }
+ return mounts;
+ },
+ syncfs(populate, callback) {
+ if (typeof populate == "function") {
+ callback = populate;
+ populate = false;
+ }
+ FS.syncFSRequests++;
+ if (FS.syncFSRequests > 1) {
+ err(
+ `warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`,
+ );
+ }
+ var mounts = FS.getMounts(FS.root.mount);
+ var completed = 0;
+ function doCallback(errCode) {
+ FS.syncFSRequests--;
+ return callback(errCode);
+ }
+ function done(errCode) {
+ if (errCode) {
+ if (!done.errored) {
+ done.errored = true;
+ return doCallback(errCode);
+ }
+ return;
+ }
+ if (++completed >= mounts.length) {
+ doCallback(null);
+ }
+ }
+ mounts.forEach((mount) => {
+ if (!mount.type.syncfs) {
+ return done(null);
+ }
+ mount.type.syncfs(mount, populate, done);
+ });
+ },
+ mount(type, opts, mountpoint) {
+ var root = mountpoint === "/";
+ var pseudo = !mountpoint;
+ var node;
+ if (root && FS.root) {
+ throw new FS.ErrnoError(10);
+ } else if (!root && !pseudo) {
+ var lookup = FS.lookupPath(mountpoint, {
+ follow_mount: false,
+ });
+ mountpoint = lookup.path;
+ node = lookup.node;
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+ if (!FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ }
+ var mount = {
+ type: type,
+ opts: opts,
+ mountpoint: mountpoint,
+ mounts: [],
+ };
+ var mountRoot = type.mount(mount);
+ mountRoot.mount = mount;
+ mount.root = mountRoot;
+ if (root) {
+ FS.root = mountRoot;
+ } else if (node) {
+ node.mounted = mount;
+ if (node.mount) {
+ node.mount.mounts.push(mount);
+ }
+ }
+ return mountRoot;
+ },
+ unmount(mountpoint) {
+ var lookup = FS.lookupPath(mountpoint, {
+ follow_mount: false,
+ });
+ if (!FS.isMountpoint(lookup.node)) {
+ throw new FS.ErrnoError(28);
+ }
+ var node = lookup.node;
+ var mount = node.mounted;
+ var mounts = FS.getMounts(mount);
+ Object.keys(FS.nameTable).forEach((hash) => {
+ var current = FS.nameTable[hash];
+ while (current) {
+ var next = current.name_next;
+ if (mounts.includes(current.mount)) {
+ FS.destroyNode(current);
+ }
+ current = next;
+ }
+ });
+ node.mounted = null;
+ var idx = node.mount.mounts.indexOf(mount);
+ node.mount.mounts.splice(idx, 1);
+ },
+ lookup(parent, name) {
+ return parent.node_ops.lookup(parent, name);
+ },
+ mknod(path, mode, dev) {
+ var lookup = FS.lookupPath(path, {
+ parent: true,
+ });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ if (!name || name === "." || name === "..") {
+ throw new FS.ErrnoError(28);
+ }
+ var errCode = FS.mayCreate(parent, name);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.mknod) {
+ throw new FS.ErrnoError(63);
+ }
+ return parent.node_ops.mknod(parent, name, mode, dev);
+ },
+ create(path, mode) {
+ mode = mode !== undefined ? mode : 438;
+ /* 0666 */ mode &= 4095;
+ mode |= 32768;
+ return FS.mknod(path, mode, 0);
+ },
+ mkdir(path, mode) {
+ mode = mode !== undefined ? mode : 511;
+ /* 0777 */ mode &= 511 | 512;
+ mode |= 16384;
+ return FS.mknod(path, mode, 0);
+ },
+ mkdirTree(path, mode) {
+ var dirs = path.split("/");
+ var d = "";
+ for (var i = 0; i < dirs.length; ++i) {
+ if (!dirs[i]) continue;
+ d += "/" + dirs[i];
+ try {
+ FS.mkdir(d, mode);
+ } catch (e) {
+ if (e.errno != 20) throw e;
+ }
+ }
+ },
+ mkdev(path, mode, dev) {
+ if (typeof dev == "undefined") {
+ dev = mode;
+ mode = 438;
+ }
+ /* 0666 */ mode |= 8192;
+ return FS.mknod(path, mode, dev);
+ },
+ symlink(oldpath, newpath) {
+ if (!PATH_FS.resolve(oldpath)) {
+ throw new FS.ErrnoError(44);
+ }
+ var lookup = FS.lookupPath(newpath, {
+ parent: true,
+ });
+ var parent = lookup.node;
+ if (!parent) {
+ throw new FS.ErrnoError(44);
+ }
+ var newname = PATH.basename(newpath);
+ var errCode = FS.mayCreate(parent, newname);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.symlink) {
+ throw new FS.ErrnoError(63);
+ }
+ return parent.node_ops.symlink(parent, newname, oldpath);
+ },
+ rename(old_path, new_path) {
+ var old_dirname = PATH.dirname(old_path);
+ var new_dirname = PATH.dirname(new_path);
+ var old_name = PATH.basename(old_path);
+ var new_name = PATH.basename(new_path);
+ var lookup, old_dir, new_dir;
+ lookup = FS.lookupPath(old_path, {
+ parent: true,
+ });
+ old_dir = lookup.node;
+ lookup = FS.lookupPath(new_path, {
+ parent: true,
+ });
+ new_dir = lookup.node;
+ if (!old_dir || !new_dir) throw new FS.ErrnoError(44);
+ if (old_dir.mount !== new_dir.mount) {
+ throw new FS.ErrnoError(75);
+ }
+ var old_node = FS.lookupNode(old_dir, old_name);
+ var relative = PATH_FS.relative(old_path, new_dirname);
+ if (relative.charAt(0) !== ".") {
+ throw new FS.ErrnoError(28);
+ }
+ relative = PATH_FS.relative(new_path, old_dirname);
+ if (relative.charAt(0) !== ".") {
+ throw new FS.ErrnoError(55);
+ }
+ var new_node;
+ try {
+ new_node = FS.lookupNode(new_dir, new_name);
+ } catch (e) {}
+ if (old_node === new_node) {
+ return;
+ }
+ var isdir = FS.isDir(old_node.mode);
+ var errCode = FS.mayDelete(old_dir, old_name, isdir);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ errCode = new_node
+ ? FS.mayDelete(new_dir, new_name, isdir)
+ : FS.mayCreate(new_dir, new_name);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!old_dir.node_ops.rename) {
+ throw new FS.ErrnoError(63);
+ }
+ if (
+ FS.isMountpoint(old_node) ||
+ (new_node && FS.isMountpoint(new_node))
+ ) {
+ throw new FS.ErrnoError(10);
+ }
+ if (new_dir !== old_dir) {
+ errCode = FS.nodePermissions(old_dir, "w");
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ }
+ FS.hashRemoveNode(old_node);
+ try {
+ old_dir.node_ops.rename(old_node, new_dir, new_name);
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.hashAddNode(old_node);
+ }
+ },
+ rmdir(path) {
+ var lookup = FS.lookupPath(path, {
+ parent: true,
+ });
+ var parent = lookup.node;
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var errCode = FS.mayDelete(parent, name, true);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.rmdir) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+ parent.node_ops.rmdir(parent, name);
+ FS.destroyNode(node);
+ },
+ readdir(path) {
+ var lookup = FS.lookupPath(path, {
+ follow: true,
+ });
+ var node = lookup.node;
+ if (!node.node_ops.readdir) {
+ throw new FS.ErrnoError(54);
+ }
+ return node.node_ops.readdir(node);
+ },
+ unlink(path) {
+ var lookup = FS.lookupPath(path, {
+ parent: true,
+ });
+ var parent = lookup.node;
+ if (!parent) {
+ throw new FS.ErrnoError(44);
+ }
+ var name = PATH.basename(path);
+ var node = FS.lookupNode(parent, name);
+ var errCode = FS.mayDelete(parent, name, false);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ if (!parent.node_ops.unlink) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isMountpoint(node)) {
+ throw new FS.ErrnoError(10);
+ }
+ parent.node_ops.unlink(parent, name);
+ FS.destroyNode(node);
+ },
+ readlink(path) {
+ var lookup = FS.lookupPath(path);
+ var link = lookup.node;
+ if (!link) {
+ throw new FS.ErrnoError(44);
+ }
+ if (!link.node_ops.readlink) {
+ throw new FS.ErrnoError(28);
+ }
+ return PATH_FS.resolve(
+ FS.getPath(link.parent),
+ link.node_ops.readlink(link),
+ );
+ },
+ stat(path, dontFollow) {
+ var lookup = FS.lookupPath(path, {
+ follow: !dontFollow,
+ });
+ var node = lookup.node;
+ if (!node) {
+ throw new FS.ErrnoError(44);
+ }
+ if (!node.node_ops.getattr) {
+ throw new FS.ErrnoError(63);
+ }
+ return node.node_ops.getattr(node);
+ },
+ lstat(path) {
+ return FS.stat(path, true);
+ },
+ chmod(path, mode, dontFollow) {
+ var node;
+ if (typeof path == "string") {
+ var lookup = FS.lookupPath(path, {
+ follow: !dontFollow,
+ });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(63);
+ }
+ node.node_ops.setattr(node, {
+ mode: (mode & 4095) | (node.mode & ~4095),
+ timestamp: Date.now(),
+ });
+ },
+ lchmod(path, mode) {
+ FS.chmod(path, mode, true);
+ },
+ fchmod(fd, mode) {
+ var stream = FS.getStreamChecked(fd);
+ FS.chmod(stream.node, mode);
+ },
+ chown(path, uid, gid, dontFollow) {
+ var node;
+ if (typeof path == "string") {
+ var lookup = FS.lookupPath(path, {
+ follow: !dontFollow,
+ });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(63);
+ }
+ node.node_ops.setattr(node, {
+ timestamp: Date.now(),
+ });
+ },
+ lchown(path, uid, gid) {
+ FS.chown(path, uid, gid, true);
+ },
+ fchown(fd, uid, gid) {
+ var stream = FS.getStreamChecked(fd);
+ FS.chown(stream.node, uid, gid);
+ },
+ truncate(path, len) {
+ if (len < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ var node;
+ if (typeof path == "string") {
+ var lookup = FS.lookupPath(path, {
+ follow: true,
+ });
+ node = lookup.node;
+ } else {
+ node = path;
+ }
+ if (!node.node_ops.setattr) {
+ throw new FS.ErrnoError(63);
+ }
+ if (FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!FS.isFile(node.mode)) {
+ throw new FS.ErrnoError(28);
+ }
+ var errCode = FS.nodePermissions(node, "w");
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ node.node_ops.setattr(node, {
+ size: len,
+ timestamp: Date.now(),
+ });
+ },
+ ftruncate(fd, len) {
+ var stream = FS.getStreamChecked(fd);
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(28);
+ }
+ FS.truncate(stream.node, len);
+ },
+ utime(path, atime, mtime) {
+ var lookup = FS.lookupPath(path, {
+ follow: true,
+ });
+ var node = lookup.node;
+ node.node_ops.setattr(node, {
+ timestamp: Math.max(atime, mtime),
+ });
+ },
+ open(path, flags, mode) {
+ if (path === "") {
+ throw new FS.ErrnoError(44);
+ }
+ flags = typeof flags == "string" ? FS_modeStringToFlags(flags) : flags;
+ mode = typeof mode == "undefined" ? 438 : /* 0666 */ mode;
+ if (flags & 64) {
+ mode = (mode & 4095) | 32768;
+ } else {
+ mode = 0;
+ }
+ var node;
+ if (typeof path == "object") {
+ node = path;
+ } else {
+ path = PATH.normalize(path);
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !(flags & 131072),
+ });
+ node = lookup.node;
+ } catch (e) {}
+ }
+ var created = false;
+ if (flags & 64) {
+ if (node) {
+ if (flags & 128) {
+ throw new FS.ErrnoError(20);
+ }
+ } else {
+ node = FS.mknod(path, mode, 0);
+ created = true;
+ }
+ }
+ if (!node) {
+ throw new FS.ErrnoError(44);
+ }
+ if (FS.isChrdev(node.mode)) {
+ flags &= ~512;
+ }
+ if (flags & 65536 && !FS.isDir(node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ if (!created) {
+ var errCode = FS.mayOpen(node, flags);
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ }
+ if (flags & 512 && !created) {
+ FS.truncate(node, 0);
+ }
+ flags &= ~(128 | 512 | 131072);
+ var stream = FS.createStream({
+ node: node,
+ path: FS.getPath(node),
+ flags: flags,
+ seekable: true,
+ position: 0,
+ stream_ops: node.stream_ops,
+ ungotten: [],
+ error: false,
+ });
+ if (stream.stream_ops.open) {
+ stream.stream_ops.open(stream);
+ }
+ if (Module["logReadFiles"] && !(flags & 1)) {
+ if (!FS.readFiles) FS.readFiles = {};
+ if (!(path in FS.readFiles)) {
+ FS.readFiles[path] = 1;
+ }
+ }
+ return stream;
+ },
+ close(stream) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (stream.getdents) stream.getdents = null;
+ try {
+ if (stream.stream_ops.close) {
+ stream.stream_ops.close(stream);
+ }
+ } catch (e) {
+ throw e;
+ } finally {
+ FS.closeStream(stream.fd);
+ }
+ stream.fd = null;
+ },
+ isClosed(stream) {
+ return stream.fd === null;
+ },
+ llseek(stream, offset, whence) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (!stream.seekable || !stream.stream_ops.llseek) {
+ throw new FS.ErrnoError(70);
+ }
+ if (whence != 0 && whence != 1 && whence != 2) {
+ throw new FS.ErrnoError(28);
+ }
+ stream.position = stream.stream_ops.llseek(stream, offset, whence);
+ stream.ungotten = [];
+ return stream.position;
+ },
+ read(stream, buffer, offset, length, position) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(8);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!stream.stream_ops.read) {
+ throw new FS.ErrnoError(28);
+ }
+ var seeking = typeof position != "undefined";
+ if (!seeking) {
+ position = stream.position;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(70);
+ }
+ var bytesRead = stream.stream_ops.read(
+ stream,
+ buffer,
+ offset,
+ length,
+ position,
+ );
+ if (!seeking) stream.position += bytesRead;
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, position, canOwn) {
+ if (length < 0 || position < 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(8);
+ }
+ if (FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(31);
+ }
+ if (!stream.stream_ops.write) {
+ throw new FS.ErrnoError(28);
+ }
+ if (stream.seekable && stream.flags & 1024) {
+ FS.llseek(stream, 0, 2);
+ }
+ var seeking = typeof position != "undefined";
+ if (!seeking) {
+ position = stream.position;
+ } else if (!stream.seekable) {
+ throw new FS.ErrnoError(70);
+ }
+ var bytesWritten = stream.stream_ops.write(
+ stream,
+ buffer,
+ offset,
+ length,
+ position,
+ canOwn,
+ );
+ if (!seeking) stream.position += bytesWritten;
+ return bytesWritten;
+ },
+ allocate(stream, offset, length) {
+ if (FS.isClosed(stream)) {
+ throw new FS.ErrnoError(8);
+ }
+ if (offset < 0 || length <= 0) {
+ throw new FS.ErrnoError(28);
+ }
+ if ((stream.flags & 2097155) === 0) {
+ throw new FS.ErrnoError(8);
+ }
+ if (!FS.isFile(stream.node.mode) && !FS.isDir(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ if (!stream.stream_ops.allocate) {
+ throw new FS.ErrnoError(138);
+ }
+ stream.stream_ops.allocate(stream, offset, length);
+ },
+ mmap(stream, length, position, prot, flags) {
+ if (
+ (prot & 2) !== 0 &&
+ (flags & 2) === 0 &&
+ (stream.flags & 2097155) !== 2
+ ) {
+ throw new FS.ErrnoError(2);
+ }
+ if ((stream.flags & 2097155) === 1) {
+ throw new FS.ErrnoError(2);
+ }
+ if (!stream.stream_ops.mmap) {
+ throw new FS.ErrnoError(43);
+ }
+ return stream.stream_ops.mmap(stream, length, position, prot, flags);
+ },
+ msync(stream, buffer, offset, length, mmapFlags) {
+ if (!stream.stream_ops.msync) {
+ return 0;
+ }
+ return stream.stream_ops.msync(
+ stream,
+ buffer,
+ offset,
+ length,
+ mmapFlags,
+ );
+ },
+ munmap: (stream) => 0,
+ ioctl(stream, cmd, arg) {
+ if (!stream.stream_ops.ioctl) {
+ throw new FS.ErrnoError(59);
+ }
+ return stream.stream_ops.ioctl(stream, cmd, arg);
+ },
+ readFile(path, opts = {}) {
+ opts.flags = opts.flags || 0;
+ opts.encoding = opts.encoding || "binary";
+ if (opts.encoding !== "utf8" && opts.encoding !== "binary") {
+ throw new Error(`Invalid encoding type "${opts.encoding}"`);
+ }
+ var ret;
+ var stream = FS.open(path, opts.flags);
+ var stat = FS.stat(path);
+ var length = stat.size;
+ var buf = new Uint8Array(length);
+ FS.read(stream, buf, 0, length, 0);
+ if (opts.encoding === "utf8") {
+ ret = UTF8ArrayToString(buf, 0);
+ } else if (opts.encoding === "binary") {
+ ret = buf;
+ }
+ FS.close(stream);
+ return ret;
+ },
+ writeFile(path, data, opts = {}) {
+ opts.flags = opts.flags || 577;
+ var stream = FS.open(path, opts.flags, opts.mode);
+ if (typeof data == "string") {
+ var buf = new Uint8Array(lengthBytesUTF8(data) + 1);
+ var actualNumBytes = stringToUTF8Array(data, buf, 0, buf.length);
+ FS.write(stream, buf, 0, actualNumBytes, undefined, opts.canOwn);
+ } else if (ArrayBuffer.isView(data)) {
+ FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn);
+ } else {
+ throw new Error("Unsupported data type");
+ }
+ FS.close(stream);
+ },
+ cwd: () => FS.currentPath,
+ chdir(path) {
+ var lookup = FS.lookupPath(path, {
+ follow: true,
+ });
+ if (lookup.node === null) {
+ throw new FS.ErrnoError(44);
+ }
+ if (!FS.isDir(lookup.node.mode)) {
+ throw new FS.ErrnoError(54);
+ }
+ var errCode = FS.nodePermissions(lookup.node, "x");
+ if (errCode) {
+ throw new FS.ErrnoError(errCode);
+ }
+ FS.currentPath = lookup.path;
+ },
+ createDefaultDirectories() {
+ FS.mkdir("/tmp");
+ FS.mkdir("/home");
+ FS.mkdir("/home/web_user");
+ },
+ createDefaultDevices() {
+ FS.mkdir("/dev");
+ FS.registerDevice(FS.makedev(1, 3), {
+ read: () => 0,
+ write: (stream, buffer, offset, length, pos) => length,
+ });
+ FS.mkdev("/dev/null", FS.makedev(1, 3));
+ TTY.register(FS.makedev(5, 0), TTY.default_tty_ops);
+ TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops);
+ FS.mkdev("/dev/tty", FS.makedev(5, 0));
+ FS.mkdev("/dev/tty1", FS.makedev(6, 0));
+ var randomBuffer = new Uint8Array(1024),
+ randomLeft = 0;
+ var randomByte = () => {
+ if (randomLeft === 0) {
+ randomLeft = randomFill(randomBuffer).byteLength;
+ }
+ return randomBuffer[--randomLeft];
+ };
+ FS.createDevice("/dev", "random", randomByte);
+ FS.createDevice("/dev", "urandom", randomByte);
+ FS.mkdir("/dev/shm");
+ FS.mkdir("/dev/shm/tmp");
+ },
+ createSpecialDirectories() {
+ FS.mkdir("/proc");
+ var proc_self = FS.mkdir("/proc/self");
+ FS.mkdir("/proc/self/fd");
+ FS.mount(
+ {
+ mount() {
+ var node = FS.createNode(
+ proc_self,
+ "fd",
+ 16384 | 511,
+ /* 0777 */ 73,
+ );
+ node.node_ops = {
+ lookup(parent, name) {
+ var fd = +name;
+ var stream = FS.getStreamChecked(fd);
+ var ret = {
+ parent: null,
+ mount: {
+ mountpoint: "fake",
+ },
+ node_ops: {
+ readlink: () => stream.path,
+ },
+ };
+ ret.parent = ret;
+ return ret;
+ },
+ };
+ return node;
+ },
+ },
+ {},
+ "/proc/self/fd",
+ );
+ },
+ createStandardStreams() {
+ if (Module["stdin"]) {
+ FS.createDevice("/dev", "stdin", Module["stdin"]);
+ } else {
+ FS.symlink("/dev/tty", "/dev/stdin");
+ }
+ if (Module["stdout"]) {
+ FS.createDevice("/dev", "stdout", null, Module["stdout"]);
+ } else {
+ FS.symlink("/dev/tty", "/dev/stdout");
+ }
+ if (Module["stderr"]) {
+ FS.createDevice("/dev", "stderr", null, Module["stderr"]);
+ } else {
+ FS.symlink("/dev/tty1", "/dev/stderr");
+ }
+ var stdin = FS.open("/dev/stdin", 0);
+ var stdout = FS.open("/dev/stdout", 1);
+ var stderr = FS.open("/dev/stderr", 1);
+ },
+ ensureErrnoError() {
+ if (FS.ErrnoError) return;
+ FS.ErrnoError = /** @this{Object} */ function ErrnoError(errno, node) {
+ this.name = "ErrnoError";
+ this.node = node;
+ this.setErrno = /** @this{Object} */ function (errno) {
+ this.errno = errno;
+ };
+ this.setErrno(errno);
+ this.message = "FS error";
+ };
+ FS.ErrnoError.prototype = new Error();
+ FS.ErrnoError.prototype.constructor = FS.ErrnoError;
+ [44].forEach((code) => {
+ FS.genericErrors[code] = new FS.ErrnoError(code);
+ FS.genericErrors[code].stack = "";
+ });
+ },
+ staticInit() {
+ FS.ensureErrnoError();
+ FS.nameTable = new Array(4096);
+ FS.mount(MEMFS, {}, "/");
+ FS.createDefaultDirectories();
+ FS.createDefaultDevices();
+ FS.createSpecialDirectories();
+ FS.filesystems = {
+ MEMFS: MEMFS,
+ };
+ },
+ init(input, output, error) {
+ FS.init.initialized = true;
+ FS.ensureErrnoError();
+ Module["stdin"] = input || Module["stdin"];
+ Module["stdout"] = output || Module["stdout"];
+ Module["stderr"] = error || Module["stderr"];
+ FS.createStandardStreams();
+ },
+ quit() {
+ FS.init.initialized = false;
+ for (var i = 0; i < FS.streams.length; i++) {
+ var stream = FS.streams[i];
+ if (!stream) {
+ continue;
+ }
+ FS.close(stream);
+ }
+ },
+ findObject(path, dontResolveLastLink) {
+ var ret = FS.analyzePath(path, dontResolveLastLink);
+ if (!ret.exists) {
+ return null;
+ }
+ return ret.object;
+ },
+ analyzePath(path, dontResolveLastLink) {
+ try {
+ var lookup = FS.lookupPath(path, {
+ follow: !dontResolveLastLink,
+ });
+ path = lookup.path;
+ } catch (e) {}
+ var ret = {
+ isRoot: false,
+ exists: false,
+ error: 0,
+ name: null,
+ path: null,
+ object: null,
+ parentExists: false,
+ parentPath: null,
+ parentObject: null,
+ };
+ try {
+ var lookup = FS.lookupPath(path, {
+ parent: true,
+ });
+ ret.parentExists = true;
+ ret.parentPath = lookup.path;
+ ret.parentObject = lookup.node;
+ ret.name = PATH.basename(path);
+ lookup = FS.lookupPath(path, {
+ follow: !dontResolveLastLink,
+ });
+ ret.exists = true;
+ ret.path = lookup.path;
+ ret.object = lookup.node;
+ ret.name = lookup.node.name;
+ ret.isRoot = lookup.path === "/";
+ } catch (e) {
+ ret.error = e.errno;
+ }
+ return ret;
+ },
+ createPath(parent, path, canRead, canWrite) {
+ parent = typeof parent == "string" ? parent : FS.getPath(parent);
+ var parts = path.split("/").reverse();
+ while (parts.length) {
+ var part = parts.pop();
+ if (!part) continue;
+ var current = PATH.join2(parent, part);
+ try {
+ FS.mkdir(current);
+ } catch (e) {}
+ parent = current;
+ }
+ return current;
+ },
+ createFile(parent, name, properties, canRead, canWrite) {
+ var path = PATH.join2(
+ typeof parent == "string" ? parent : FS.getPath(parent),
+ name,
+ );
+ var mode = FS_getMode(canRead, canWrite);
+ return FS.create(path, mode);
+ },
+ createDataFile(parent, name, data, canRead, canWrite, canOwn) {
+ var path = name;
+ if (parent) {
+ parent = typeof parent == "string" ? parent : FS.getPath(parent);
+ path = name ? PATH.join2(parent, name) : parent;
+ }
+ var mode = FS_getMode(canRead, canWrite);
+ var node = FS.create(path, mode);
+ if (data) {
+ if (typeof data == "string") {
+ var arr = new Array(data.length);
+ for (var i = 0, len = data.length; i < len; ++i)
+ arr[i] = data.charCodeAt(i);
+ data = arr;
+ }
+ FS.chmod(node, mode | 146);
+ var stream = FS.open(node, 577);
+ FS.write(stream, data, 0, data.length, 0, canOwn);
+ FS.close(stream);
+ FS.chmod(node, mode);
+ }
+ return node;
+ },
+ createDevice(parent, name, input, output) {
+ var path = PATH.join2(
+ typeof parent == "string" ? parent : FS.getPath(parent),
+ name,
+ );
+ var mode = FS_getMode(!!input, !!output);
+ if (!FS.createDevice.major) FS.createDevice.major = 64;
+ var dev = FS.makedev(FS.createDevice.major++, 0);
+ FS.registerDevice(dev, {
+ open(stream) {
+ stream.seekable = false;
+ },
+ close(stream) {
+ if (output && output.buffer && output.buffer.length) {
+ output(10);
+ }
+ },
+ read(stream, buffer, offset, length, pos) {
+ /* ignored */ var bytesRead = 0;
+ for (var i = 0; i < length; i++) {
+ var result;
+ try {
+ result = input();
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ if (result === undefined && bytesRead === 0) {
+ throw new FS.ErrnoError(6);
+ }
+ if (result === null || result === undefined) break;
+ bytesRead++;
+ buffer[offset + i] = result;
+ }
+ if (bytesRead) {
+ stream.node.timestamp = Date.now();
+ }
+ return bytesRead;
+ },
+ write(stream, buffer, offset, length, pos) {
+ for (var i = 0; i < length; i++) {
+ try {
+ output(buffer[offset + i]);
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ }
+ if (length) {
+ stream.node.timestamp = Date.now();
+ }
+ return i;
+ },
+ });
+ return FS.mkdev(path, mode, dev);
+ },
+ forceLoadFile(obj) {
+ if (obj.isDevice || obj.isFolder || obj.link || obj.contents)
+ return true;
+ if (typeof XMLHttpRequest != "undefined") {
+ throw new Error(
+ "Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.",
+ );
+ } else if (read_) {
+ try {
+ obj.contents = intArrayFromString(read_(obj.url), true);
+ obj.usedBytes = obj.contents.length;
+ } catch (e) {
+ throw new FS.ErrnoError(29);
+ }
+ } else {
+ throw new Error("Cannot load without read() or XMLHttpRequest.");
+ }
+ },
+ createLazyFile(parent, name, url, canRead, canWrite) {
+ /** @constructor */ function LazyUint8Array() {
+ this.lengthKnown = false;
+ this.chunks = [];
+ }
+ LazyUint8Array.prototype.get =
+ /** @this{Object} */ function LazyUint8Array_get(idx) {
+ if (idx > this.length - 1 || idx < 0) {
+ return undefined;
+ }
+ var chunkOffset = idx % this.chunkSize;
+ var chunkNum = (idx / this.chunkSize) | 0;
+ return this.getter(chunkNum)[chunkOffset];
+ };
+ LazyUint8Array.prototype.setDataGetter =
+ function LazyUint8Array_setDataGetter(getter) {
+ this.getter = getter;
+ };
+ LazyUint8Array.prototype.cacheLength =
+ function LazyUint8Array_cacheLength() {
+ var xhr = new XMLHttpRequest();
+ xhr.open("HEAD", url, false);
+ xhr.send(null);
+ if (
+ !((xhr.status >= 200 && xhr.status < 300) || xhr.status === 304)
+ )
+ throw new Error(
+ "Couldn't load " + url + ". Status: " + xhr.status,
+ );
+ var datalength = Number(xhr.getResponseHeader("Content-length"));
+ var header;
+ var hasByteServing =
+ (header = xhr.getResponseHeader("Accept-Ranges")) &&
+ header === "bytes";
+ var usesGzip =
+ (header = xhr.getResponseHeader("Content-Encoding")) &&
+ header === "gzip";
+ var chunkSize = 1024 * 1024;
+ if (!hasByteServing) chunkSize = datalength;
+ var doXHR = (from, to) => {
+ if (from > to)
+ throw new Error(
+ "invalid range (" +
+ from +
+ ", " +
+ to +
+ ") or no bytes requested!",
+ );
+ if (to > datalength - 1)
+ throw new Error(
+ "only " + datalength + " bytes available! programmer error!",
+ );
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", url, false);
+ if (datalength !== chunkSize)
+ xhr.setRequestHeader("Range", "bytes=" + from + "-" + to);
+ xhr.responseType = "arraybuffer";
+ if (xhr.overrideMimeType) {
+ xhr.overrideMimeType("text/plain; charset=x-user-defined");
+ }
+ xhr.send(null);
+ if (
+ !((xhr.status >= 200 && xhr.status < 300) || xhr.status === 304)
+ )
+ throw new Error(
+ "Couldn't load " + url + ". Status: " + xhr.status,
+ );
+ if (xhr.response !== undefined) {
+ return new Uint8Array(
+ /** @type{Array} */ (xhr.response || []),
+ );
+ }
+ return intArrayFromString(xhr.responseText || "", true);
+ };
+ var lazyArray = this;
+ lazyArray.setDataGetter((chunkNum) => {
+ var start = chunkNum * chunkSize;
+ var end = (chunkNum + 1) * chunkSize - 1;
+ end = Math.min(end, datalength - 1);
+ if (typeof lazyArray.chunks[chunkNum] == "undefined") {
+ lazyArray.chunks[chunkNum] = doXHR(start, end);
+ }
+ if (typeof lazyArray.chunks[chunkNum] == "undefined")
+ throw new Error("doXHR failed!");
+ return lazyArray.chunks[chunkNum];
+ });
+ if (usesGzip || !datalength) {
+ chunkSize = datalength = 1;
+ datalength = this.getter(0).length;
+ chunkSize = datalength;
+ out(
+ "LazyFiles on gzip forces download of the whole file when length is accessed",
+ );
+ }
+ this._length = datalength;
+ this._chunkSize = chunkSize;
+ this.lengthKnown = true;
+ };
+ if (typeof XMLHttpRequest != "undefined") {
+ if (!ENVIRONMENT_IS_WORKER)
+ throw "Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc";
+ var lazyArray = new LazyUint8Array();
+ Object.defineProperties(lazyArray, {
+ length: {
+ get: /** @this{Object} */ function () {
+ if (!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._length;
+ },
+ },
+ chunkSize: {
+ get: /** @this{Object} */ function () {
+ if (!this.lengthKnown) {
+ this.cacheLength();
+ }
+ return this._chunkSize;
+ },
+ },
+ });
+ var properties = {
+ isDevice: false,
+ contents: lazyArray,
+ };
+ } else {
+ var properties = {
+ isDevice: false,
+ url: url,
+ };
+ }
+ var node = FS.createFile(parent, name, properties, canRead, canWrite);
+ if (properties.contents) {
+ node.contents = properties.contents;
+ } else if (properties.url) {
+ node.contents = null;
+ node.url = properties.url;
+ }
+ Object.defineProperties(node, {
+ usedBytes: {
+ get: /** @this {FSNode} */ function () {
+ return this.contents.length;
+ },
+ },
+ });
+ var stream_ops = {};
+ var keys = Object.keys(node.stream_ops);
+ keys.forEach((key) => {
+ var fn = node.stream_ops[key];
+ stream_ops[key] = function forceLoadLazyFile() {
+ FS.forceLoadFile(node);
+ return fn.apply(null, arguments);
+ };
+ });
+ function writeChunks(stream, buffer, offset, length, position) {
+ var contents = stream.node.contents;
+ if (position >= contents.length) return 0;
+ var size = Math.min(contents.length - position, length);
+ if (contents.slice) {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents[position + i];
+ }
+ } else {
+ for (var i = 0; i < size; i++) {
+ buffer[offset + i] = contents.get(position + i);
+ }
+ }
+ return size;
+ }
+ stream_ops.read = (stream, buffer, offset, length, position) => {
+ FS.forceLoadFile(node);
+ return writeChunks(stream, buffer, offset, length, position);
+ };
+ stream_ops.mmap = (stream, length, position, prot, flags) => {
+ FS.forceLoadFile(node);
+ var ptr = mmapAlloc(length);
+ if (!ptr) {
+ throw new FS.ErrnoError(48);
+ }
+ writeChunks(stream, GROWABLE_HEAP_I8(), ptr, length, position);
+ return {
+ ptr: ptr,
+ allocated: true,
+ };
+ };
+ node.stream_ops = stream_ops;
+ return node;
+ },
+ };
+
+ /**
+ * Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the
+ * emscripten HEAP, returns a copy of that string as a Javascript String object.
+ *
+ * @param {number} ptr
+ * @param {number=} maxBytesToRead - An optional length that specifies the
+ * maximum number of bytes to read. You can omit this parameter to scan the
+ * string until the first 0 byte. If maxBytesToRead is passed, and the string
+ * at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the
+ * string will cut short at that byte index (i.e. maxBytesToRead will not
+ * produce a string of exact length [ptr, ptr+maxBytesToRead[) N.B. mixing
+ * frequent uses of UTF8ToString() with and without maxBytesToRead may throw
+ * JS JIT optimizations off, so it is worth to consider consistently using one
+ * @return {string}
+ */ var UTF8ToString = (ptr, maxBytesToRead) => {
+ ptr >>>= 0;
+ return ptr
+ ? UTF8ArrayToString(GROWABLE_HEAP_U8(), ptr, maxBytesToRead)
+ : "";
+ };
+
+ var SYSCALLS = {
+ DEFAULT_POLLMASK: 5,
+ calculateAt(dirfd, path, allowEmpty) {
+ if (PATH.isAbs(path)) {
+ return path;
+ }
+ var dir;
+ if (dirfd === -100) {
+ dir = FS.cwd();
+ } else {
+ var dirstream = SYSCALLS.getStreamFromFD(dirfd);
+ dir = dirstream.path;
+ }
+ if (path.length == 0) {
+ if (!allowEmpty) {
+ throw new FS.ErrnoError(44);
+ }
+ return dir;
+ }
+ return PATH.join2(dir, path);
+ },
+ doStat(func, path, buf) {
+ try {
+ var stat = func(path);
+ } catch (e) {
+ if (
+ e &&
+ e.node &&
+ PATH.normalize(path) !== PATH.normalize(FS.getPath(e.node))
+ ) {
+ return -54;
+ }
+ throw e;
+ }
+ GROWABLE_HEAP_I32()[(buf >>> 2) >>> 0] = stat.dev;
+ GROWABLE_HEAP_I32()[((buf + 4) >>> 2) >>> 0] = stat.mode;
+ GROWABLE_HEAP_U32()[((buf + 8) >>> 2) >>> 0] = stat.nlink;
+ GROWABLE_HEAP_I32()[((buf + 12) >>> 2) >>> 0] = stat.uid;
+ GROWABLE_HEAP_I32()[((buf + 16) >>> 2) >>> 0] = stat.gid;
+ GROWABLE_HEAP_I32()[((buf + 20) >>> 2) >>> 0] = stat.rdev;
+ (tempI64 = [
+ stat.size >>> 0,
+ ((tempDouble = stat.size),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((buf + 24) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((buf + 28) >>> 2) >>> 0] = tempI64[1]);
+ GROWABLE_HEAP_I32()[((buf + 32) >>> 2) >>> 0] = 4096;
+ GROWABLE_HEAP_I32()[((buf + 36) >>> 2) >>> 0] = stat.blocks;
+ var atime = stat.atime.getTime();
+ var mtime = stat.mtime.getTime();
+ var ctime = stat.ctime.getTime();
+ (tempI64 = [
+ Math.floor(atime / 1e3) >>> 0,
+ ((tempDouble = Math.floor(atime / 1e3)),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((buf + 40) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((buf + 44) >>> 2) >>> 0] = tempI64[1]);
+ GROWABLE_HEAP_U32()[((buf + 48) >>> 2) >>> 0] = (atime % 1e3) * 1e3;
+ (tempI64 = [
+ Math.floor(mtime / 1e3) >>> 0,
+ ((tempDouble = Math.floor(mtime / 1e3)),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((buf + 56) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((buf + 60) >>> 2) >>> 0] = tempI64[1]);
+ GROWABLE_HEAP_U32()[((buf + 64) >>> 2) >>> 0] = (mtime % 1e3) * 1e3;
+ (tempI64 = [
+ Math.floor(ctime / 1e3) >>> 0,
+ ((tempDouble = Math.floor(ctime / 1e3)),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((buf + 72) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((buf + 76) >>> 2) >>> 0] = tempI64[1]);
+ GROWABLE_HEAP_U32()[((buf + 80) >>> 2) >>> 0] = (ctime % 1e3) * 1e3;
+ (tempI64 = [
+ stat.ino >>> 0,
+ ((tempDouble = stat.ino),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((buf + 88) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((buf + 92) >>> 2) >>> 0] = tempI64[1]);
+ return 0;
+ },
+ doMsync(addr, stream, len, flags, offset) {
+ if (!FS.isFile(stream.node.mode)) {
+ throw new FS.ErrnoError(43);
+ }
+ if (flags & 2) {
+ return 0;
+ }
+ var buffer = GROWABLE_HEAP_U8().slice(addr, addr + len);
+ FS.msync(stream, buffer, offset, len, flags);
+ },
+ varargs: undefined,
+ get() {
+ var ret = GROWABLE_HEAP_I32()[(+SYSCALLS.varargs >>> 2) >>> 0];
+ SYSCALLS.varargs += 4;
+ return ret;
+ },
+ getp() {
+ return SYSCALLS.get();
+ },
+ getStr(ptr) {
+ var ret = UTF8ToString(ptr);
+ return ret;
+ },
+ getStreamFromFD(fd) {
+ var stream = FS.getStreamChecked(fd);
+ return stream;
+ },
+ };
+
+ function _proc_exit(code) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(0, 1, code);
+ EXITSTATUS = code;
+ if (!keepRuntimeAlive()) {
+ PThread.terminateAllThreads();
+ if (Module["onExit"]) Module["onExit"](code);
+ ABORT = true;
+ }
+ quit_(code, new ExitStatus(code));
+ }
+
+ /** @param {boolean|number=} implicit */ var exitJS = (
+ status,
+ implicit,
+ ) => {
+ EXITSTATUS = status;
+ if (ENVIRONMENT_IS_PTHREAD) {
+ exitOnMainThread(status);
+ throw "unwind";
+ }
+ _proc_exit(status);
+ };
+
+ var _exit = exitJS;
+
+ var handleException = (e) => {
+ if (e instanceof ExitStatus || e == "unwind") {
+ return EXITSTATUS;
+ }
+ quit_(1, e);
+ };
+
+ var PThread = {
+ unusedWorkers: [],
+ runningWorkers: [],
+ tlsInitFunctions: [],
+ pthreads: {},
+ init() {
+ if (ENVIRONMENT_IS_PTHREAD) {
+ PThread.initWorker();
+ } else {
+ PThread.initMainThread();
+ }
+ },
+ initMainThread() {
+ var pthreadPoolSize = 10;
+ while (pthreadPoolSize--) {
+ PThread.allocateUnusedWorker();
+ }
+ addOnPreRun(() => {
+ addRunDependency("loading-workers");
+ PThread.loadWasmModuleToAllWorkers(() =>
+ removeRunDependency("loading-workers"),
+ );
+ });
+ },
+ initWorker() {
+ PThread["receiveObjectTransfer"] = PThread.receiveObjectTransfer;
+ PThread["threadInitTLS"] = PThread.threadInitTLS;
+ PThread["setExitStatus"] = PThread.setExitStatus;
+ noExitRuntime = false;
+ },
+ setExitStatus: (status) => {
+ EXITSTATUS = status;
+ },
+ terminateAllThreads__deps: ["$terminateWorker"],
+ terminateAllThreads: () => {
+ for (var worker of PThread.runningWorkers) {
+ terminateWorker(worker);
+ }
+ for (var worker of PThread.unusedWorkers) {
+ terminateWorker(worker);
+ }
+ PThread.unusedWorkers = [];
+ PThread.runningWorkers = [];
+ PThread.pthreads = [];
+ },
+ returnWorkerToPool: (worker) => {
+ var pthread_ptr = worker.pthread_ptr;
+ delete PThread.pthreads[pthread_ptr];
+ PThread.unusedWorkers.push(worker);
+ PThread.runningWorkers.splice(
+ PThread.runningWorkers.indexOf(worker),
+ 1,
+ );
+ worker.pthread_ptr = 0;
+ __emscripten_thread_free_data(pthread_ptr);
+ },
+ receiveObjectTransfer(data) {},
+ threadInitTLS() {
+ PThread.tlsInitFunctions.forEach((f) => f());
+ },
+ loadWasmModuleToWorker: (worker) =>
+ new Promise((onFinishedLoading) => {
+ worker.onmessage = (e) => {
+ var d = e["data"];
+ var cmd = d["cmd"];
+ if (d["targetThread"] && d["targetThread"] != _pthread_self()) {
+ var targetWorker = PThread.pthreads[d["targetThread"]];
+ if (targetWorker) {
+ targetWorker.postMessage(d, d["transferList"]);
+ } else {
+ err(
+ `Internal error! Worker sent a message "${cmd}" to target pthread ${d["targetThread"]}, but that thread no longer exists!`,
+ );
+ }
+ return;
+ }
+ if (cmd === "checkMailbox") {
+ checkMailbox();
+ } else if (cmd === "spawnThread") {
+ spawnThread(d);
+ } else if (cmd === "cleanupThread") {
+ cleanupThread(d["thread"]);
+ } else if (cmd === "killThread") {
+ killThread(d["thread"]);
+ } else if (cmd === "cancelThread") {
+ cancelThread(d["thread"]);
+ } else if (cmd === "loaded") {
+ worker.loaded = true;
+ if (ENVIRONMENT_IS_NODE && !worker.pthread_ptr) {
+ worker.unref();
+ }
+ onFinishedLoading(worker);
+ } else if (cmd === "alert") {
+ alert(`Thread ${d["threadId"]}: ${d["text"]}`);
+ } else if (d.target === "setimmediate") {
+ worker.postMessage(d);
+ } else if (cmd === "callHandler") {
+ Module[d["handler"]](...d["args"]);
+ } else if (cmd) {
+ err(`worker sent an unknown command ${cmd}`);
+ }
+ };
+ worker.onerror = (e) => {
+ var message = "worker sent an error!";
+ err(`${message} ${e.filename}:${e.lineno}: ${e.message}`);
+ throw e;
+ };
+ if (ENVIRONMENT_IS_NODE) {
+ worker.on("message", (data) =>
+ worker.onmessage({
+ data: data,
+ }),
+ );
+ worker.on("error", (e) => worker.onerror(e));
+ }
+ var handlers = [];
+ var knownHandlers = ["onExit", "onAbort", "print", "printErr"];
+ for (var handler of knownHandlers) {
+ if (Module.hasOwnProperty(handler)) {
+ handlers.push(handler);
+ }
+ }
+ worker.postMessage({
+ cmd: "load",
+ handlers: handlers,
+ urlOrBlob: Module["mainScriptUrlOrBlob"] || _scriptDir,
+ wasmMemory: wasmMemory,
+ wasmModule: wasmModule,
+ });
+ }),
+ loadWasmModuleToAllWorkers(onMaybeReady) {
+ if (ENVIRONMENT_IS_PTHREAD) {
+ return onMaybeReady();
+ }
+ let pthreadPoolReady = Promise.all(
+ PThread.unusedWorkers.map(PThread.loadWasmModuleToWorker),
+ );
+ pthreadPoolReady.then(onMaybeReady);
+ },
+ allocateUnusedWorker() {
+ var worker;
+ var pthreadMainJs = locateFile("emHdBindings.worker.js");
+ worker = new Worker(pthreadMainJs);
+ PThread.unusedWorkers.push(worker);
+ },
+ getNewWorker() {
+ if (PThread.unusedWorkers.length == 0) {
+ PThread.allocateUnusedWorker();
+ PThread.loadWasmModuleToWorker(PThread.unusedWorkers[0]);
+ }
+ return PThread.unusedWorkers.pop();
+ },
+ };
+
+ Module["PThread"] = PThread;
+
+ var callRuntimeCallbacks = (callbacks) => {
+ while (callbacks.length > 0) {
+ callbacks.shift()(Module);
+ }
+ };
+
+ var establishStackSpace = () => {
+ var pthread_ptr = _pthread_self();
+ var stackHigh = GROWABLE_HEAP_U32()[((pthread_ptr + 52) >>> 2) >>> 0];
+ var stackSize = GROWABLE_HEAP_U32()[((pthread_ptr + 56) >>> 2) >>> 0];
+ var stackLow = stackHigh - stackSize;
+ _emscripten_stack_set_limits(stackHigh, stackLow);
+ stackRestore(stackHigh);
+ };
+
+ Module["establishStackSpace"] = establishStackSpace;
+
+ function exitOnMainThread(returnCode) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(1, 0, returnCode);
+ _exit(returnCode);
+ }
+
+ /**
+ * @param {number} ptr
+ * @param {string} type
+ */ function getValue(ptr, type = "i8") {
+ if (type.endsWith("*")) type = "*";
+ switch (type) {
+ case "i1":
+ return GROWABLE_HEAP_I8()[(ptr >>> 0) >>> 0];
+
+ case "i8":
+ return GROWABLE_HEAP_I8()[(ptr >>> 0) >>> 0];
+
+ case "i16":
+ return GROWABLE_HEAP_I16()[(ptr >>> 1) >>> 0];
+
+ case "i32":
+ return GROWABLE_HEAP_I32()[(ptr >>> 2) >>> 0];
+
+ case "i64":
+ abort("to do getValue(i64) use WASM_BIGINT");
+
+ case "float":
+ return GROWABLE_HEAP_F32()[(ptr >>> 2) >>> 0];
+
+ case "double":
+ return GROWABLE_HEAP_F64()[(ptr >>> 3) >>> 0];
+
+ case "*":
+ return GROWABLE_HEAP_U32()[(ptr >>> 2) >>> 0];
+
+ default:
+ abort(`invalid type for getValue: ${type}`);
+ }
+ }
+
+ var invokeEntryPoint = (ptr, arg) => {
+ var result = ((a1) => dynCall_ii.apply(null, [ptr, a1]))(arg);
+ function finish(result) {
+ if (keepRuntimeAlive()) {
+ PThread.setExitStatus(result);
+ } else {
+ __emscripten_thread_exit(result);
+ }
+ }
+ finish(result);
+ };
+
+ Module["invokeEntryPoint"] = invokeEntryPoint;
+
+ var noExitRuntime = Module["noExitRuntime"] || true;
+
+ var registerTLSInit = (tlsInitFunc) => {
+ PThread.tlsInitFunctions.push(tlsInitFunc);
+ };
+
+ /**
+ * @param {number} ptr
+ * @param {number} value
+ * @param {string} type
+ */ function setValue(ptr, value, type = "i8") {
+ if (type.endsWith("*")) type = "*";
+ switch (type) {
+ case "i1":
+ GROWABLE_HEAP_I8()[(ptr >>> 0) >>> 0] = value;
+ break;
+
+ case "i8":
+ GROWABLE_HEAP_I8()[(ptr >>> 0) >>> 0] = value;
+ break;
+
+ case "i16":
+ GROWABLE_HEAP_I16()[(ptr >>> 1) >>> 0] = value;
+ break;
+
+ case "i32":
+ GROWABLE_HEAP_I32()[(ptr >>> 2) >>> 0] = value;
+ break;
+
+ case "i64":
+ abort("to do setValue(i64) use WASM_BIGINT");
+
+ case "float":
+ GROWABLE_HEAP_F32()[(ptr >>> 2) >>> 0] = value;
+ break;
+
+ case "double":
+ GROWABLE_HEAP_F64()[(ptr >>> 3) >>> 0] = value;
+ break;
+
+ case "*":
+ GROWABLE_HEAP_U32()[(ptr >>> 2) >>> 0] = value;
+ break;
+
+ default:
+ abort(`invalid type for setValue: ${type}`);
+ }
+ }
+
+ var convertI32PairToI53Checked = (lo, hi) =>
+ (hi + 2097152) >>> 0 < 4194305 - !!lo
+ ? (lo >>> 0) + hi * 4294967296
+ : NaN;
+
+ function ___assert_fail(condition, filename, line, func) {
+ condition >>>= 0;
+ filename >>>= 0;
+ func >>>= 0;
+ abort(
+ `Assertion failed: ${UTF8ToString(condition)}, at: ` +
+ [
+ filename ? UTF8ToString(filename) : "unknown filename",
+ line,
+ func ? UTF8ToString(func) : "unknown function",
+ ],
+ );
+ }
+
+ var ___call_sighandler = function (fp, sig) {
+ fp >>>= 0;
+ return ((a1) => dynCall_vi.apply(null, [fp, a1]))(sig);
+ };
+
+ var exceptionCaught = [];
+
+ var uncaughtExceptionCount = 0;
+
+ function ___cxa_begin_catch(ptr) {
+ ptr >>>= 0;
+ var info = new ExceptionInfo(ptr);
+ if (!info.get_caught()) {
+ info.set_caught(true);
+ uncaughtExceptionCount--;
+ }
+ info.set_rethrown(false);
+ exceptionCaught.push(info);
+ ___cxa_increment_exception_refcount(info.excPtr);
+ return info.get_exception_ptr();
+ }
+
+ var exceptionLast = 0;
+
+ var ___cxa_end_catch = () => {
+ _setThrew(0, 0);
+ var info = exceptionCaught.pop();
+ ___cxa_decrement_exception_refcount(info.excPtr);
+ exceptionLast = 0;
+ };
+
+ /** @constructor */ function ExceptionInfo(excPtr) {
+ this.excPtr = excPtr;
+ this.ptr = excPtr - 24;
+ this.set_type = function (type) {
+ GROWABLE_HEAP_U32()[((this.ptr + 4) >>> 2) >>> 0] = type;
+ };
+ this.get_type = function () {
+ return GROWABLE_HEAP_U32()[((this.ptr + 4) >>> 2) >>> 0];
+ };
+ this.set_destructor = function (destructor) {
+ GROWABLE_HEAP_U32()[((this.ptr + 8) >>> 2) >>> 0] = destructor;
+ };
+ this.get_destructor = function () {
+ return GROWABLE_HEAP_U32()[((this.ptr + 8) >>> 2) >>> 0];
+ };
+ this.set_caught = function (caught) {
+ caught = caught ? 1 : 0;
+ GROWABLE_HEAP_I8()[((this.ptr + 12) >>> 0) >>> 0] = caught;
+ };
+ this.get_caught = function () {
+ return GROWABLE_HEAP_I8()[((this.ptr + 12) >>> 0) >>> 0] != 0;
+ };
+ this.set_rethrown = function (rethrown) {
+ rethrown = rethrown ? 1 : 0;
+ GROWABLE_HEAP_I8()[((this.ptr + 13) >>> 0) >>> 0] = rethrown;
+ };
+ this.get_rethrown = function () {
+ return GROWABLE_HEAP_I8()[((this.ptr + 13) >>> 0) >>> 0] != 0;
+ };
+ this.init = function (type, destructor) {
+ this.set_adjusted_ptr(0);
+ this.set_type(type);
+ this.set_destructor(destructor);
+ };
+ this.set_adjusted_ptr = function (adjustedPtr) {
+ GROWABLE_HEAP_U32()[((this.ptr + 16) >>> 2) >>> 0] = adjustedPtr;
+ };
+ this.get_adjusted_ptr = function () {
+ return GROWABLE_HEAP_U32()[((this.ptr + 16) >>> 2) >>> 0];
+ };
+ this.get_exception_ptr = function () {
+ var isPointer = ___cxa_is_pointer_type(this.get_type());
+ if (isPointer) {
+ return GROWABLE_HEAP_U32()[(this.excPtr >>> 2) >>> 0];
+ }
+ var adjusted = this.get_adjusted_ptr();
+ if (adjusted !== 0) return adjusted;
+ return this.excPtr;
+ };
+ }
+
+ function ___resumeException(ptr) {
+ ptr >>>= 0;
+ if (!exceptionLast) {
+ exceptionLast = ptr;
+ }
+ throw exceptionLast;
+ }
+
+ var findMatchingCatch = (args) => {
+ var thrown = exceptionLast;
+ if (!thrown) {
+ setTempRet0(0);
+ return 0;
+ }
+ var info = new ExceptionInfo(thrown);
+ info.set_adjusted_ptr(thrown);
+ var thrownType = info.get_type();
+ if (!thrownType) {
+ setTempRet0(0);
+ return thrown;
+ }
+ for (var arg in args) {
+ var caughtType = args[arg];
+ if (caughtType === 0 || caughtType === thrownType) {
+ break;
+ }
+ var adjusted_ptr_addr = info.ptr + 16;
+ if (___cxa_can_catch(caughtType, thrownType, adjusted_ptr_addr)) {
+ setTempRet0(caughtType);
+ return thrown;
+ }
+ }
+ setTempRet0(thrownType);
+ return thrown;
+ };
+
+ function ___cxa_find_matching_catch_2() {
+ return findMatchingCatch([]);
+ }
+
+ function ___cxa_find_matching_catch_3(arg0) {
+ arg0 >>>= 0;
+ return findMatchingCatch([arg0]);
+ }
+
+ function ___cxa_find_matching_catch_4(arg0, arg1) {
+ arg0 >>>= 0;
+ arg1 >>>= 0;
+ return findMatchingCatch([arg0, arg1]);
+ }
+
+ function ___cxa_get_exception_ptr(ptr) {
+ ptr >>>= 0;
+ var rtn = new ExceptionInfo(ptr).get_exception_ptr();
+ return rtn;
+ }
+
+ var ___cxa_rethrow = () => {
+ var info = exceptionCaught.pop();
+ if (!info) {
+ abort("no exception to throw");
+ }
+ var ptr = info.excPtr;
+ if (!info.get_rethrown()) {
+ exceptionCaught.push(info);
+ info.set_rethrown(true);
+ info.set_caught(false);
+ uncaughtExceptionCount++;
+ }
+ exceptionLast = ptr;
+ throw exceptionLast;
+ };
+
+ function ___cxa_rethrow_primary_exception(ptr) {
+ ptr >>>= 0;
+ if (!ptr) return;
+ var info = new ExceptionInfo(ptr);
+ exceptionCaught.push(info);
+ info.set_rethrown(true);
+ ___cxa_rethrow();
+ }
+
+ function ___cxa_throw(ptr, type, destructor) {
+ ptr >>>= 0;
+ type >>>= 0;
+ destructor >>>= 0;
+ var info = new ExceptionInfo(ptr);
+ info.init(type, destructor);
+ exceptionLast = ptr;
+ uncaughtExceptionCount++;
+ throw exceptionLast;
+ }
+
+ var ___cxa_uncaught_exceptions = () => uncaughtExceptionCount;
+
+ function ___emscripten_init_main_thread_js(tb) {
+ tb >>>= 0;
+ __emscripten_thread_init(
+ tb,
+ /*is_main=*/ !ENVIRONMENT_IS_WORKER,
+ /*is_runtime=*/ 1,
+ /*can_block=*/ !ENVIRONMENT_IS_WEB,
+ /*default_stacksize=*/ 2097152,
+ /*start_profiling=*/ false,
+ );
+ PThread.threadInitTLS();
+ }
+
+ function ___emscripten_thread_cleanup(thread) {
+ thread >>>= 0;
+ if (!ENVIRONMENT_IS_PTHREAD) cleanupThread(thread);
+ else
+ postMessage({
+ cmd: "cleanupThread",
+ thread: thread,
+ });
+ }
+
+ function pthreadCreateProxied(pthread_ptr, attr, startRoutine, arg) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(2, 1, pthread_ptr, attr, startRoutine, arg);
+ return ___pthread_create_js(pthread_ptr, attr, startRoutine, arg);
+ }
+
+ function ___pthread_create_js(pthread_ptr, attr, startRoutine, arg) {
+ pthread_ptr >>>= 0;
+ attr >>>= 0;
+ startRoutine >>>= 0;
+ arg >>>= 0;
+ if (typeof SharedArrayBuffer == "undefined") {
+ err(
+ "Current environment does not support SharedArrayBuffer, pthreads are not available!",
+ );
+ return 6;
+ }
+ var transferList = [];
+ var error = 0;
+ if (ENVIRONMENT_IS_PTHREAD && (transferList.length === 0 || error)) {
+ return pthreadCreateProxied(pthread_ptr, attr, startRoutine, arg);
+ }
+ if (error) return error;
+ var threadParams = {
+ startRoutine: startRoutine,
+ pthread_ptr: pthread_ptr,
+ arg: arg,
+ transferList: transferList,
+ };
+ if (ENVIRONMENT_IS_PTHREAD) {
+ threadParams.cmd = "spawnThread";
+ postMessage(threadParams, transferList);
+ return 0;
+ }
+ return spawnThread(threadParams);
+ }
+
+ function ___syscall_chmod(path, mode) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(3, 1, path, mode);
+ path >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ FS.chmod(path, mode);
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_faccessat(dirfd, path, amode, flags) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(4, 1, dirfd, path, amode, flags);
+ path >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ path = SYSCALLS.calculateAt(dirfd, path);
+ if (amode & ~7) {
+ return -28;
+ }
+ var lookup = FS.lookupPath(path, {
+ follow: true,
+ });
+ var node = lookup.node;
+ if (!node) {
+ return -44;
+ }
+ var perms = "";
+ if (amode & 4) perms += "r";
+ if (amode & 2) perms += "w";
+ if (amode & 1) perms += "x";
+ if (
+ perms &&
+ /* otherwise, they've just passed F_OK */ FS.nodePermissions(
+ node,
+ perms,
+ )
+ ) {
+ return -2;
+ }
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_fadvise64(fd, offset, len, advice) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(5, 0, fd, offset, len, advice);
+ return 0;
+ }
+
+ function ___syscall_fchmod(fd, mode) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(6, 1, fd, mode);
+ try {
+ FS.fchmod(fd, mode);
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ var setErrNo = (value) => {
+ GROWABLE_HEAP_I32()[(___errno_location() >>> 2) >>> 0] = value;
+ return value;
+ };
+
+ function ___syscall_fcntl64(fd, cmd, varargs) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(7, 1, fd, cmd, varargs);
+ varargs >>>= 0;
+ SYSCALLS.varargs = varargs;
+ try {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ switch (cmd) {
+ case 0: {
+ var arg = SYSCALLS.get();
+ if (arg < 0) {
+ return -28;
+ }
+ while (FS.streams[arg]) {
+ arg++;
+ }
+ var newStream;
+ newStream = FS.createStream(stream, arg);
+ return newStream.fd;
+ }
+
+ case 1:
+ case 2:
+ return 0;
+
+ case 3:
+ return stream.flags;
+
+ case 4: {
+ var arg = SYSCALLS.get();
+ stream.flags |= arg;
+ return 0;
+ }
+
+ case 5: {
+ var arg = SYSCALLS.getp();
+ var offset = 0;
+ GROWABLE_HEAP_I16()[((arg + offset) >>> 1) >>> 0] = 2;
+ return 0;
+ }
+
+ case 6:
+ case 7:
+ return 0;
+
+ case 16:
+ case 8:
+ return -28;
+
+ case 9:
+ setErrNo(28);
+ return -1;
+
+ default: {
+ return -28;
+ }
+ }
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_fstat64(fd, buf) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(8, 1, fd, buf);
+ buf >>>= 0;
+ try {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ return SYSCALLS.doStat(FS.stat, stream.path, buf);
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ var stringToUTF8 = (str, outPtr, maxBytesToWrite) =>
+ stringToUTF8Array(str, GROWABLE_HEAP_U8(), outPtr, maxBytesToWrite);
+
+ function ___syscall_getcwd(buf, size) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(9, 1, buf, size);
+ buf >>>= 0;
+ size >>>= 0;
+ try {
+ if (size === 0) return -28;
+ var cwd = FS.cwd();
+ var cwdLengthInBytes = lengthBytesUTF8(cwd) + 1;
+ if (size < cwdLengthInBytes) return -68;
+ stringToUTF8(cwd, buf, size);
+ return cwdLengthInBytes;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_getdents64(fd, dirp, count) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(10, 1, fd, dirp, count);
+ dirp >>>= 0;
+ count >>>= 0;
+ try {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ if (!stream.getdents) {
+ stream.getdents = FS.readdir(stream.path);
+ }
+ var struct_size = 280;
+ var pos = 0;
+ var off = FS.llseek(stream, 0, 1);
+ var idx = Math.floor(off / struct_size);
+ while (idx < stream.getdents.length && pos + struct_size <= count) {
+ var id;
+ var type;
+ var name = stream.getdents[idx];
+ if (name === ".") {
+ id = stream.node.id;
+ type = 4;
+ } else if (name === "..") {
+ var lookup = FS.lookupPath(stream.path, {
+ parent: true,
+ });
+ id = lookup.node.id;
+ type = 4;
+ } else {
+ var child = FS.lookupNode(stream.node, name);
+ id = child.id;
+ type = FS.isChrdev(child.mode)
+ ? 2
+ : FS.isDir(child.mode)
+ ? 4
+ : FS.isLink(child.mode)
+ ? 10
+ : 8;
+ }
+ (tempI64 = [
+ id >>> 0,
+ ((tempDouble = id),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((dirp + pos) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((dirp + pos + 4) >>> 2) >>> 0] = tempI64[1]);
+ (tempI64 = [
+ ((idx + 1) * struct_size) >>> 0,
+ ((tempDouble = (idx + 1) * struct_size),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((dirp + pos + 8) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((dirp + pos + 12) >>> 2) >>> 0] = tempI64[1]);
+ GROWABLE_HEAP_I16()[((dirp + pos + 16) >>> 1) >>> 0] = 280;
+ GROWABLE_HEAP_I8()[((dirp + pos + 18) >>> 0) >>> 0] = type;
+ stringToUTF8(name, dirp + pos + 19, 256);
+ pos += struct_size;
+ idx += 1;
+ }
+ FS.llseek(stream, idx * struct_size, 0);
+ return pos;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_ioctl(fd, op, varargs) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(11, 1, fd, op, varargs);
+ varargs >>>= 0;
+ SYSCALLS.varargs = varargs;
+ try {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ switch (op) {
+ case 21509: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+
+ case 21505: {
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tcgets) {
+ var termios = stream.tty.ops.ioctl_tcgets(stream);
+ var argp = SYSCALLS.getp();
+ GROWABLE_HEAP_I32()[(argp >>> 2) >>> 0] = termios.c_iflag || 0;
+ GROWABLE_HEAP_I32()[((argp + 4) >>> 2) >>> 0] =
+ termios.c_oflag || 0;
+ GROWABLE_HEAP_I32()[((argp + 8) >>> 2) >>> 0] =
+ termios.c_cflag || 0;
+ GROWABLE_HEAP_I32()[((argp + 12) >>> 2) >>> 0] =
+ termios.c_lflag || 0;
+ for (var i = 0; i < 32; i++) {
+ GROWABLE_HEAP_I8()[((argp + i + 17) >>> 0) >>> 0] =
+ termios.c_cc[i] || 0;
+ }
+ return 0;
+ }
+ return 0;
+ }
+
+ case 21510:
+ case 21511:
+ case 21512: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+
+ case 21506:
+ case 21507:
+ case 21508: {
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tcsets) {
+ var argp = SYSCALLS.getp();
+ var c_iflag = GROWABLE_HEAP_I32()[(argp >>> 2) >>> 0];
+ var c_oflag = GROWABLE_HEAP_I32()[((argp + 4) >>> 2) >>> 0];
+ var c_cflag = GROWABLE_HEAP_I32()[((argp + 8) >>> 2) >>> 0];
+ var c_lflag = GROWABLE_HEAP_I32()[((argp + 12) >>> 2) >>> 0];
+ var c_cc = [];
+ for (var i = 0; i < 32; i++) {
+ c_cc.push(GROWABLE_HEAP_I8()[((argp + i + 17) >>> 0) >>> 0]);
+ }
+ return stream.tty.ops.ioctl_tcsets(stream.tty, op, {
+ c_iflag: c_iflag,
+ c_oflag: c_oflag,
+ c_cflag: c_cflag,
+ c_lflag: c_lflag,
+ c_cc: c_cc,
+ });
+ }
+ return 0;
+ }
+
+ case 21519: {
+ if (!stream.tty) return -59;
+ var argp = SYSCALLS.getp();
+ GROWABLE_HEAP_I32()[(argp >>> 2) >>> 0] = 0;
+ return 0;
+ }
+
+ case 21520: {
+ if (!stream.tty) return -59;
+ return -28;
+ }
+
+ case 21531: {
+ var argp = SYSCALLS.getp();
+ return FS.ioctl(stream, op, argp);
+ }
+
+ case 21523: {
+ if (!stream.tty) return -59;
+ if (stream.tty.ops.ioctl_tiocgwinsz) {
+ var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty);
+ var argp = SYSCALLS.getp();
+ GROWABLE_HEAP_I16()[(argp >>> 1) >>> 0] = winsize[0];
+ GROWABLE_HEAP_I16()[((argp + 2) >>> 1) >>> 0] = winsize[1];
+ }
+ return 0;
+ }
+
+ case 21524: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+
+ case 21515: {
+ if (!stream.tty) return -59;
+ return 0;
+ }
+
+ default:
+ return -28;
+ }
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_lstat64(path, buf) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(12, 1, path, buf);
+ path >>>= 0;
+ buf >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ return SYSCALLS.doStat(FS.lstat, path, buf);
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_mkdirat(dirfd, path, mode) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(13, 1, dirfd, path, mode);
+ path >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ path = SYSCALLS.calculateAt(dirfd, path);
+ path = PATH.normalize(path);
+ if (path[path.length - 1] === "/")
+ path = path.substr(0, path.length - 1);
+ FS.mkdir(path, mode, 0);
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_newfstatat(dirfd, path, buf, flags) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(14, 1, dirfd, path, buf, flags);
+ path >>>= 0;
+ buf >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ var nofollow = flags & 256;
+ var allowEmpty = flags & 4096;
+ flags = flags & ~6400;
+ path = SYSCALLS.calculateAt(dirfd, path, allowEmpty);
+ return SYSCALLS.doStat(nofollow ? FS.lstat : FS.stat, path, buf);
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_openat(dirfd, path, flags, varargs) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(15, 1, dirfd, path, flags, varargs);
+ path >>>= 0;
+ varargs >>>= 0;
+ SYSCALLS.varargs = varargs;
+ try {
+ path = SYSCALLS.getStr(path);
+ path = SYSCALLS.calculateAt(dirfd, path);
+ var mode = varargs ? SYSCALLS.get() : 0;
+ return FS.open(path, flags, mode).fd;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_readlinkat(dirfd, path, buf, bufsize) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(16, 1, dirfd, path, buf, bufsize);
+ path >>>= 0;
+ buf >>>= 0;
+ bufsize >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ path = SYSCALLS.calculateAt(dirfd, path);
+ if (bufsize <= 0) return -28;
+ var ret = FS.readlink(path);
+ var len = Math.min(bufsize, lengthBytesUTF8(ret));
+ var endChar = GROWABLE_HEAP_I8()[(buf + len) >>> 0];
+ stringToUTF8(ret, buf, bufsize + 1);
+ GROWABLE_HEAP_I8()[(buf + len) >>> 0] = endChar;
+ return len;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_renameat(olddirfd, oldpath, newdirfd, newpath) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(17, 1, olddirfd, oldpath, newdirfd, newpath);
+ oldpath >>>= 0;
+ newpath >>>= 0;
+ try {
+ oldpath = SYSCALLS.getStr(oldpath);
+ newpath = SYSCALLS.getStr(newpath);
+ oldpath = SYSCALLS.calculateAt(olddirfd, oldpath);
+ newpath = SYSCALLS.calculateAt(newdirfd, newpath);
+ FS.rename(oldpath, newpath);
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_stat64(path, buf) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(18, 1, path, buf);
+ path >>>= 0;
+ buf >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ return SYSCALLS.doStat(FS.stat, path, buf);
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function ___syscall_unlinkat(dirfd, path, flags) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(19, 1, dirfd, path, flags);
+ path >>>= 0;
+ try {
+ path = SYSCALLS.getStr(path);
+ path = SYSCALLS.calculateAt(dirfd, path);
+ if (flags === 0) {
+ FS.unlink(path);
+ } else if (flags === 512) {
+ FS.rmdir(path);
+ } else {
+ abort("Invalid flags passed to unlinkat");
+ }
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function __embind_register_bigint(
+ primitiveType,
+ name,
+ size,
+ minRange,
+ maxRange,
+ ) {
+ primitiveType >>>= 0;
+ name >>>= 0;
+ size >>>= 0;
+ }
+
+ var embind_init_charCodes = () => {
+ var codes = new Array(256);
+ for (var i = 0; i < 256; ++i) {
+ codes[i] = String.fromCharCode(i);
+ }
+ embind_charCodes = codes;
+ };
+
+ var embind_charCodes;
+
+ var readLatin1String = (ptr) => {
+ var ret = "";
+ var c = ptr;
+ while (GROWABLE_HEAP_U8()[c >>> 0]) {
+ ret += embind_charCodes[GROWABLE_HEAP_U8()[c++ >>> 0]];
+ }
+ return ret;
+ };
+
+ var awaitingDependencies = {};
+
+ var registeredTypes = {};
+
+ var typeDependencies = {};
+
+ var BindingError;
+
+ var throwBindingError = (message) => {
+ throw new BindingError(message);
+ };
+
+ var InternalError;
+
+ var throwInternalError = (message) => {
+ throw new InternalError(message);
+ };
+
+ var whenDependentTypesAreResolved = (
+ myTypes,
+ dependentTypes,
+ getTypeConverters,
+ ) => {
+ myTypes.forEach(function (type) {
+ typeDependencies[type] = dependentTypes;
+ });
+ function onComplete(typeConverters) {
+ var myTypeConverters = getTypeConverters(typeConverters);
+ if (myTypeConverters.length !== myTypes.length) {
+ throwInternalError("Mismatched type converter count");
+ }
+ for (var i = 0; i < myTypes.length; ++i) {
+ registerType(myTypes[i], myTypeConverters[i]);
+ }
+ }
+ var typeConverters = new Array(dependentTypes.length);
+ var unregisteredTypes = [];
+ var registered = 0;
+ dependentTypes.forEach((dt, i) => {
+ if (registeredTypes.hasOwnProperty(dt)) {
+ typeConverters[i] = registeredTypes[dt];
+ } else {
+ unregisteredTypes.push(dt);
+ if (!awaitingDependencies.hasOwnProperty(dt)) {
+ awaitingDependencies[dt] = [];
+ }
+ awaitingDependencies[dt].push(() => {
+ typeConverters[i] = registeredTypes[dt];
+ ++registered;
+ if (registered === unregisteredTypes.length) {
+ onComplete(typeConverters);
+ }
+ });
+ }
+ });
+ if (0 === unregisteredTypes.length) {
+ onComplete(typeConverters);
+ }
+ };
+
+ /** @param {Object=} options */ function sharedRegisterType(
+ rawType,
+ registeredInstance,
+ options = {},
+ ) {
+ var name = registeredInstance.name;
+ if (!rawType) {
+ throwBindingError(
+ `type "${name}" must have a positive integer typeid pointer`,
+ );
+ }
+ if (registeredTypes.hasOwnProperty(rawType)) {
+ if (options.ignoreDuplicateRegistrations) {
+ return;
+ } else {
+ throwBindingError(`Cannot register type '${name}' twice`);
+ }
+ }
+ registeredTypes[rawType] = registeredInstance;
+ delete typeDependencies[rawType];
+ if (awaitingDependencies.hasOwnProperty(rawType)) {
+ var callbacks = awaitingDependencies[rawType];
+ delete awaitingDependencies[rawType];
+ callbacks.forEach((cb) => cb());
+ }
+ }
+
+ /** @param {Object=} options */ function registerType(
+ rawType,
+ registeredInstance,
+ options = {},
+ ) {
+ if (!("argPackAdvance" in registeredInstance)) {
+ throw new TypeError(
+ "registerType registeredInstance requires argPackAdvance",
+ );
+ }
+ return sharedRegisterType(rawType, registeredInstance, options);
+ }
+
+ var GenericWireTypeSize = 8;
+
+ /** @suppress {globalThis} */ function __embind_register_bool(
+ rawType,
+ name,
+ trueValue,
+ falseValue,
+ ) {
+ rawType >>>= 0;
+ name >>>= 0;
+ name = readLatin1String(name);
+ registerType(rawType, {
+ name: name,
+ fromWireType: function (wt) {
+ return !!wt;
+ },
+ toWireType: function (destructors, o) {
+ return o ? trueValue : falseValue;
+ },
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: function (pointer) {
+ return this["fromWireType"](GROWABLE_HEAP_U8()[pointer >>> 0]);
+ },
+ destructorFunction: null,
+ });
+ }
+
+ var shallowCopyInternalPointer = (o) => ({
+ count: o.count,
+ deleteScheduled: o.deleteScheduled,
+ preservePointerOnDelete: o.preservePointerOnDelete,
+ ptr: o.ptr,
+ ptrType: o.ptrType,
+ smartPtr: o.smartPtr,
+ smartPtrType: o.smartPtrType,
+ });
+
+ var throwInstanceAlreadyDeleted = (obj) => {
+ function getInstanceTypeName(handle) {
+ return handle.$$.ptrType.registeredClass.name;
+ }
+ throwBindingError(getInstanceTypeName(obj) + " instance already deleted");
+ };
+
+ var finalizationRegistry = false;
+
+ var detachFinalizer = (handle) => {};
+
+ var runDestructor = ($$) => {
+ if ($$.smartPtr) {
+ $$.smartPtrType.rawDestructor($$.smartPtr);
+ } else {
+ $$.ptrType.registeredClass.rawDestructor($$.ptr);
+ }
+ };
+
+ var releaseClassHandle = ($$) => {
+ $$.count.value -= 1;
+ var toDelete = 0 === $$.count.value;
+ if (toDelete) {
+ runDestructor($$);
+ }
+ };
+
+ var downcastPointer = (ptr, ptrClass, desiredClass) => {
+ if (ptrClass === desiredClass) {
+ return ptr;
+ }
+ if (undefined === desiredClass.baseClass) {
+ return null;
+ }
+ var rv = downcastPointer(ptr, ptrClass, desiredClass.baseClass);
+ if (rv === null) {
+ return null;
+ }
+ return desiredClass.downcast(rv);
+ };
+
+ var registeredPointers = {};
+
+ var getInheritedInstanceCount = () =>
+ Object.keys(registeredInstances).length;
+
+ var getLiveInheritedInstances = () => {
+ var rv = [];
+ for (var k in registeredInstances) {
+ if (registeredInstances.hasOwnProperty(k)) {
+ rv.push(registeredInstances[k]);
+ }
+ }
+ return rv;
+ };
+
+ var deletionQueue = [];
+
+ var flushPendingDeletes = () => {
+ while (deletionQueue.length) {
+ var obj = deletionQueue.pop();
+ obj.$$.deleteScheduled = false;
+ obj["delete"]();
+ }
+ };
+
+ var delayFunction;
+
+ var setDelayFunction = (fn) => {
+ delayFunction = fn;
+ if (deletionQueue.length && delayFunction) {
+ delayFunction(flushPendingDeletes);
+ }
+ };
+
+ var init_embind = () => {
+ Module["getInheritedInstanceCount"] = getInheritedInstanceCount;
+ Module["getLiveInheritedInstances"] = getLiveInheritedInstances;
+ Module["flushPendingDeletes"] = flushPendingDeletes;
+ Module["setDelayFunction"] = setDelayFunction;
+ };
+
+ var registeredInstances = {};
+
+ var getBasestPointer = (class_, ptr) => {
+ if (ptr === undefined) {
+ throwBindingError("ptr should not be undefined");
+ }
+ while (class_.baseClass) {
+ ptr = class_.upcast(ptr);
+ class_ = class_.baseClass;
+ }
+ return ptr;
+ };
+
+ var getInheritedInstance = (class_, ptr) => {
+ ptr = getBasestPointer(class_, ptr);
+ return registeredInstances[ptr];
+ };
+
+ var makeClassHandle = (prototype, record) => {
+ if (!record.ptrType || !record.ptr) {
+ throwInternalError("makeClassHandle requires ptr and ptrType");
+ }
+ var hasSmartPtrType = !!record.smartPtrType;
+ var hasSmartPtr = !!record.smartPtr;
+ if (hasSmartPtrType !== hasSmartPtr) {
+ throwInternalError("Both smartPtrType and smartPtr must be specified");
+ }
+ record.count = {
+ value: 1,
+ };
+ return attachFinalizer(
+ Object.create(prototype, {
+ $$: {
+ value: record,
+ },
+ }),
+ );
+ };
+
+ /** @suppress {globalThis} */ function RegisteredPointer_fromWireType(ptr) {
+ var rawPointer = this.getPointee(ptr);
+ if (!rawPointer) {
+ this.destructor(ptr);
+ return null;
+ }
+ var registeredInstance = getInheritedInstance(
+ this.registeredClass,
+ rawPointer,
+ );
+ if (undefined !== registeredInstance) {
+ if (0 === registeredInstance.$$.count.value) {
+ registeredInstance.$$.ptr = rawPointer;
+ registeredInstance.$$.smartPtr = ptr;
+ return registeredInstance["clone"]();
+ } else {
+ var rv = registeredInstance["clone"]();
+ this.destructor(ptr);
+ return rv;
+ }
+ }
+ function makeDefaultHandle() {
+ if (this.isSmartPointer) {
+ return makeClassHandle(this.registeredClass.instancePrototype, {
+ ptrType: this.pointeeType,
+ ptr: rawPointer,
+ smartPtrType: this,
+ smartPtr: ptr,
+ });
+ } else {
+ return makeClassHandle(this.registeredClass.instancePrototype, {
+ ptrType: this,
+ ptr: ptr,
+ });
+ }
+ }
+ var actualType = this.registeredClass.getActualType(rawPointer);
+ var registeredPointerRecord = registeredPointers[actualType];
+ if (!registeredPointerRecord) {
+ return makeDefaultHandle.call(this);
+ }
+ var toType;
+ if (this.isConst) {
+ toType = registeredPointerRecord.constPointerType;
+ } else {
+ toType = registeredPointerRecord.pointerType;
+ }
+ var dp = downcastPointer(
+ rawPointer,
+ this.registeredClass,
+ toType.registeredClass,
+ );
+ if (dp === null) {
+ return makeDefaultHandle.call(this);
+ }
+ if (this.isSmartPointer) {
+ return makeClassHandle(toType.registeredClass.instancePrototype, {
+ ptrType: toType,
+ ptr: dp,
+ smartPtrType: this,
+ smartPtr: ptr,
+ });
+ } else {
+ return makeClassHandle(toType.registeredClass.instancePrototype, {
+ ptrType: toType,
+ ptr: dp,
+ });
+ }
+ }
+
+ var attachFinalizer = (handle) => {
+ if ("undefined" === typeof FinalizationRegistry) {
+ attachFinalizer = (handle) => handle;
+ return handle;
+ }
+ finalizationRegistry = new FinalizationRegistry((info) => {
+ releaseClassHandle(info.$$);
+ });
+ attachFinalizer = (handle) => {
+ var $$ = handle.$$;
+ var hasSmartPtr = !!$$.smartPtr;
+ if (hasSmartPtr) {
+ var info = {
+ $$: $$,
+ };
+ finalizationRegistry.register(handle, info, handle);
+ }
+ return handle;
+ };
+ detachFinalizer = (handle) => finalizationRegistry.unregister(handle);
+ return attachFinalizer(handle);
+ };
+
+ var init_ClassHandle = () => {
+ Object.assign(ClassHandle.prototype, {
+ isAliasOf(other) {
+ if (!(this instanceof ClassHandle)) {
+ return false;
+ }
+ if (!(other instanceof ClassHandle)) {
+ return false;
+ }
+ var leftClass = this.$$.ptrType.registeredClass;
+ var left = this.$$.ptr;
+ other.$$ = /** @type {Object} */ (other.$$);
+ var rightClass = other.$$.ptrType.registeredClass;
+ var right = other.$$.ptr;
+ while (leftClass.baseClass) {
+ left = leftClass.upcast(left);
+ leftClass = leftClass.baseClass;
+ }
+ while (rightClass.baseClass) {
+ right = rightClass.upcast(right);
+ rightClass = rightClass.baseClass;
+ }
+ return leftClass === rightClass && left === right;
+ },
+ clone() {
+ if (!this.$$.ptr) {
+ throwInstanceAlreadyDeleted(this);
+ }
+ if (this.$$.preservePointerOnDelete) {
+ this.$$.count.value += 1;
+ return this;
+ } else {
+ var clone = attachFinalizer(
+ Object.create(Object.getPrototypeOf(this), {
+ $$: {
+ value: shallowCopyInternalPointer(this.$$),
+ },
+ }),
+ );
+ clone.$$.count.value += 1;
+ clone.$$.deleteScheduled = false;
+ return clone;
+ }
+ },
+ delete() {
+ if (!this.$$.ptr) {
+ throwInstanceAlreadyDeleted(this);
+ }
+ if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) {
+ throwBindingError("Object already scheduled for deletion");
+ }
+ detachFinalizer(this);
+ releaseClassHandle(this.$$);
+ if (!this.$$.preservePointerOnDelete) {
+ this.$$.smartPtr = undefined;
+ this.$$.ptr = undefined;
+ }
+ },
+ isDeleted() {
+ return !this.$$.ptr;
+ },
+ deleteLater() {
+ if (!this.$$.ptr) {
+ throwInstanceAlreadyDeleted(this);
+ }
+ if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) {
+ throwBindingError("Object already scheduled for deletion");
+ }
+ deletionQueue.push(this);
+ if (deletionQueue.length === 1 && delayFunction) {
+ delayFunction(flushPendingDeletes);
+ }
+ this.$$.deleteScheduled = true;
+ return this;
+ },
+ });
+ };
+
+ /** @constructor */ function ClassHandle() {}
+
+ var char_0 = 48;
+
+ var char_9 = 57;
+
+ var makeLegalFunctionName = (name) => {
+ if (undefined === name) {
+ return "_unknown";
+ }
+ name = name.replace(/[^a-zA-Z0-9_]/g, "$");
+ var f = name.charCodeAt(0);
+ if (f >= char_0 && f <= char_9) {
+ return `_${name}`;
+ }
+ return name;
+ };
+
+ function createNamedFunction(name, body) {
+ name = makeLegalFunctionName(name);
+ return {
+ [name]: function () {
+ return body.apply(this, arguments);
+ },
+ }[name];
+ }
+
+ var ensureOverloadTable = (proto, methodName, humanName) => {
+ if (undefined === proto[methodName].overloadTable) {
+ var prevFunc = proto[methodName];
+ proto[methodName] = function () {
+ if (
+ !proto[methodName].overloadTable.hasOwnProperty(arguments.length)
+ ) {
+ throwBindingError(
+ `Function '${humanName}' called with an invalid number of arguments (${arguments.length}) - expects one of (${proto[methodName].overloadTable})!`,
+ );
+ }
+ return proto[methodName].overloadTable[arguments.length].apply(
+ this,
+ arguments,
+ );
+ };
+ proto[methodName].overloadTable = [];
+ proto[methodName].overloadTable[prevFunc.argCount] = prevFunc;
+ }
+ };
+
+ /** @param {number=} numArguments */ var exposePublicSymbol = (
+ name,
+ value,
+ numArguments,
+ ) => {
+ if (Module.hasOwnProperty(name)) {
+ if (
+ undefined === numArguments ||
+ (undefined !== Module[name].overloadTable &&
+ undefined !== Module[name].overloadTable[numArguments])
+ ) {
+ throwBindingError(`Cannot register public name '${name}' twice`);
+ }
+ ensureOverloadTable(Module, name, name);
+ if (Module.hasOwnProperty(numArguments)) {
+ throwBindingError(
+ `Cannot register multiple overloads of a function with the same number of arguments (${numArguments})!`,
+ );
+ }
+ Module[name].overloadTable[numArguments] = value;
+ } else {
+ Module[name] = value;
+ if (undefined !== numArguments) {
+ Module[name].numArguments = numArguments;
+ }
+ }
+ };
+
+ /** @constructor */ function RegisteredClass(
+ name,
+ constructor,
+ instancePrototype,
+ rawDestructor,
+ baseClass,
+ getActualType,
+ upcast,
+ downcast,
+ ) {
+ this.name = name;
+ this.constructor = constructor;
+ this.instancePrototype = instancePrototype;
+ this.rawDestructor = rawDestructor;
+ this.baseClass = baseClass;
+ this.getActualType = getActualType;
+ this.upcast = upcast;
+ this.downcast = downcast;
+ this.pureVirtualFunctions = [];
+ }
+
+ var upcastPointer = (ptr, ptrClass, desiredClass) => {
+ while (ptrClass !== desiredClass) {
+ if (!ptrClass.upcast) {
+ throwBindingError(
+ `Expected null or instance of ${desiredClass.name}, got an instance of ${ptrClass.name}`,
+ );
+ }
+ ptr = ptrClass.upcast(ptr);
+ ptrClass = ptrClass.baseClass;
+ }
+ return ptr;
+ };
+
+ /** @suppress {globalThis} */ function constNoSmartPtrRawPointerToWireType(
+ destructors,
+ handle,
+ ) {
+ if (handle === null) {
+ if (this.isReference) {
+ throwBindingError(`null is not a valid ${this.name}`);
+ }
+ return 0;
+ }
+ if (!handle.$$) {
+ throwBindingError(
+ `Cannot pass "${embindRepr(handle)}" as a ${this.name}`,
+ );
+ }
+ if (!handle.$$.ptr) {
+ throwBindingError(
+ `Cannot pass deleted object as a pointer of type ${this.name}`,
+ );
+ }
+ var handleClass = handle.$$.ptrType.registeredClass;
+ var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass);
+ return ptr;
+ }
+
+ /** @suppress {globalThis} */ function genericPointerToWireType(
+ destructors,
+ handle,
+ ) {
+ var ptr;
+ if (handle === null) {
+ if (this.isReference) {
+ throwBindingError(`null is not a valid ${this.name}`);
+ }
+ if (this.isSmartPointer) {
+ ptr = this.rawConstructor();
+ if (destructors !== null) {
+ destructors.push(this.rawDestructor, ptr);
+ }
+ return ptr;
+ } else {
+ return 0;
+ }
+ }
+ if (!handle.$$) {
+ throwBindingError(
+ `Cannot pass "${embindRepr(handle)}" as a ${this.name}`,
+ );
+ }
+ if (!handle.$$.ptr) {
+ throwBindingError(
+ `Cannot pass deleted object as a pointer of type ${this.name}`,
+ );
+ }
+ if (!this.isConst && handle.$$.ptrType.isConst) {
+ throwBindingError(
+ `Cannot convert argument of type ${handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name} to parameter type ${this.name}`,
+ );
+ }
+ var handleClass = handle.$$.ptrType.registeredClass;
+ ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass);
+ if (this.isSmartPointer) {
+ if (undefined === handle.$$.smartPtr) {
+ throwBindingError("Passing raw pointer to smart pointer is illegal");
+ }
+ switch (this.sharingPolicy) {
+ case 0:
+ if (handle.$$.smartPtrType === this) {
+ ptr = handle.$$.smartPtr;
+ } else {
+ throwBindingError(
+ `Cannot convert argument of type ${handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name} to parameter type ${this.name}`,
+ );
+ }
+ break;
+
+ case 1:
+ ptr = handle.$$.smartPtr;
+ break;
+
+ case 2:
+ if (handle.$$.smartPtrType === this) {
+ ptr = handle.$$.smartPtr;
+ } else {
+ var clonedHandle = handle["clone"]();
+ ptr = this.rawShare(
+ ptr,
+ Emval.toHandle(() => clonedHandle["delete"]()),
+ );
+ if (destructors !== null) {
+ destructors.push(this.rawDestructor, ptr);
+ }
+ }
+ break;
+
+ default:
+ throwBindingError("Unsupporting sharing policy");
+ }
+ }
+ return ptr;
+ }
+
+ /** @suppress {globalThis} */ function nonConstNoSmartPtrRawPointerToWireType(
+ destructors,
+ handle,
+ ) {
+ if (handle === null) {
+ if (this.isReference) {
+ throwBindingError(`null is not a valid ${this.name}`);
+ }
+ return 0;
+ }
+ if (!handle.$$) {
+ throwBindingError(
+ `Cannot pass "${embindRepr(handle)}" as a ${this.name}`,
+ );
+ }
+ if (!handle.$$.ptr) {
+ throwBindingError(
+ `Cannot pass deleted object as a pointer of type ${this.name}`,
+ );
+ }
+ if (handle.$$.ptrType.isConst) {
+ throwBindingError(
+ `Cannot convert argument of type ${handle.$$.ptrType.name} to parameter type ${this.name}`,
+ );
+ }
+ var handleClass = handle.$$.ptrType.registeredClass;
+ var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass);
+ return ptr;
+ }
+
+ /** @suppress {globalThis} */ function readPointer(pointer) {
+ return this["fromWireType"](GROWABLE_HEAP_U32()[(pointer >>> 2) >>> 0]);
+ }
+
+ var init_RegisteredPointer = () => {
+ Object.assign(RegisteredPointer.prototype, {
+ getPointee(ptr) {
+ if (this.rawGetPointee) {
+ ptr = this.rawGetPointee(ptr);
+ }
+ return ptr;
+ },
+ destructor(ptr) {
+ if (this.rawDestructor) {
+ this.rawDestructor(ptr);
+ }
+ },
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: readPointer,
+ deleteObject(handle) {
+ if (handle !== null) {
+ handle["delete"]();
+ }
+ },
+ fromWireType: RegisteredPointer_fromWireType,
+ });
+ };
+
+ /** @constructor
+ @param {*=} pointeeType,
+ @param {*=} sharingPolicy,
+ @param {*=} rawGetPointee,
+ @param {*=} rawConstructor,
+ @param {*=} rawShare,
+ @param {*=} rawDestructor,
+ */ function RegisteredPointer(
+ name,
+ registeredClass,
+ isReference,
+ isConst,
+ isSmartPointer,
+ pointeeType,
+ sharingPolicy,
+ rawGetPointee,
+ rawConstructor,
+ rawShare,
+ rawDestructor,
+ ) {
+ this.name = name;
+ this.registeredClass = registeredClass;
+ this.isReference = isReference;
+ this.isConst = isConst;
+ this.isSmartPointer = isSmartPointer;
+ this.pointeeType = pointeeType;
+ this.sharingPolicy = sharingPolicy;
+ this.rawGetPointee = rawGetPointee;
+ this.rawConstructor = rawConstructor;
+ this.rawShare = rawShare;
+ this.rawDestructor = rawDestructor;
+ if (!isSmartPointer && registeredClass.baseClass === undefined) {
+ if (isConst) {
+ this["toWireType"] = constNoSmartPtrRawPointerToWireType;
+ this.destructorFunction = null;
+ } else {
+ this["toWireType"] = nonConstNoSmartPtrRawPointerToWireType;
+ this.destructorFunction = null;
+ }
+ } else {
+ this["toWireType"] = genericPointerToWireType;
+ }
+ }
+
+ /** @param {number=} numArguments */ var replacePublicSymbol = (
+ name,
+ value,
+ numArguments,
+ ) => {
+ if (!Module.hasOwnProperty(name)) {
+ throwInternalError("Replacing nonexistant public symbol");
+ }
+ if (
+ undefined !== Module[name].overloadTable &&
+ undefined !== numArguments
+ ) {
+ Module[name].overloadTable[numArguments] = value;
+ } else {
+ Module[name] = value;
+ Module[name].argCount = numArguments;
+ }
+ };
+
+ var dynCallLegacy = (sig, ptr, args) => {
+ var f = Module["dynCall_" + sig];
+ return args && args.length
+ ? f.apply(null, [ptr].concat(args))
+ : f.call(null, ptr);
+ };
+
+ var wasmTableMirror = [];
+
+ var wasmTable;
+
+ var getWasmTableEntry = (funcPtr) => {
+ var func = wasmTableMirror[funcPtr];
+ if (!func) {
+ if (funcPtr >= wasmTableMirror.length)
+ wasmTableMirror.length = funcPtr + 1;
+ wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr);
+ }
+ return func;
+ };
+
+ /** @param {Object=} args */ var dynCall = (sig, ptr, args) => {
+ var rtn = dynCallLegacy(sig, ptr, args);
+ return rtn;
+ };
+
+ var getDynCaller = (sig, ptr) => {
+ var argCache = [];
+ return function () {
+ argCache.length = 0;
+ Object.assign(argCache, arguments);
+ return dynCall(sig, ptr, argCache);
+ };
+ };
+
+ var embind__requireFunction = (signature, rawFunction) => {
+ signature = readLatin1String(signature);
+ function makeDynCaller() {
+ return getDynCaller(signature, rawFunction);
+ }
+ var fp = makeDynCaller();
+ if (typeof fp != "function") {
+ throwBindingError(
+ `unknown function pointer with signature ${signature}: ${rawFunction}`,
+ );
+ }
+ return fp;
+ };
+
+ var extendError = (baseErrorType, errorName) => {
+ var errorClass = createNamedFunction(errorName, function (message) {
+ this.name = errorName;
+ this.message = message;
+ var stack = new Error(message).stack;
+ if (stack !== undefined) {
+ this.stack =
+ this.toString() + "\n" + stack.replace(/^Error(:[^\n]*)?\n/, "");
+ }
+ });
+ errorClass.prototype = Object.create(baseErrorType.prototype);
+ errorClass.prototype.constructor = errorClass;
+ errorClass.prototype.toString = function () {
+ if (this.message === undefined) {
+ return this.name;
+ } else {
+ return `${this.name}: ${this.message}`;
+ }
+ };
+ return errorClass;
+ };
+
+ var UnboundTypeError;
+
+ var getTypeName = (type) => {
+ var ptr = ___getTypeName(type);
+ var rv = readLatin1String(ptr);
+ _free(ptr);
+ return rv;
+ };
+
+ var throwUnboundTypeError = (message, types) => {
+ var unboundTypes = [];
+ var seen = {};
+ function visit(type) {
+ if (seen[type]) {
+ return;
+ }
+ if (registeredTypes[type]) {
+ return;
+ }
+ if (typeDependencies[type]) {
+ typeDependencies[type].forEach(visit);
+ return;
+ }
+ unboundTypes.push(type);
+ seen[type] = true;
+ }
+ types.forEach(visit);
+ throw new UnboundTypeError(
+ `${message}: ` + unboundTypes.map(getTypeName).join([", "]),
+ );
+ };
+
+ function __embind_register_class(
+ rawType,
+ rawPointerType,
+ rawConstPointerType,
+ baseClassRawType,
+ getActualTypeSignature,
+ getActualType,
+ upcastSignature,
+ upcast,
+ downcastSignature,
+ downcast,
+ name,
+ destructorSignature,
+ rawDestructor,
+ ) {
+ rawType >>>= 0;
+ rawPointerType >>>= 0;
+ rawConstPointerType >>>= 0;
+ baseClassRawType >>>= 0;
+ getActualTypeSignature >>>= 0;
+ getActualType >>>= 0;
+ upcastSignature >>>= 0;
+ upcast >>>= 0;
+ downcastSignature >>>= 0;
+ downcast >>>= 0;
+ name >>>= 0;
+ destructorSignature >>>= 0;
+ rawDestructor >>>= 0;
+ name = readLatin1String(name);
+ getActualType = embind__requireFunction(
+ getActualTypeSignature,
+ getActualType,
+ );
+ if (upcast) {
+ upcast = embind__requireFunction(upcastSignature, upcast);
+ }
+ if (downcast) {
+ downcast = embind__requireFunction(downcastSignature, downcast);
+ }
+ rawDestructor = embind__requireFunction(
+ destructorSignature,
+ rawDestructor,
+ );
+ var legalFunctionName = makeLegalFunctionName(name);
+ exposePublicSymbol(legalFunctionName, function () {
+ throwUnboundTypeError(`Cannot construct ${name} due to unbound types`, [
+ baseClassRawType,
+ ]);
+ });
+ whenDependentTypesAreResolved(
+ [rawType, rawPointerType, rawConstPointerType],
+ baseClassRawType ? [baseClassRawType] : [],
+ function (base) {
+ base = base[0];
+ var baseClass;
+ var basePrototype;
+ if (baseClassRawType) {
+ baseClass = base.registeredClass;
+ basePrototype = baseClass.instancePrototype;
+ } else {
+ basePrototype = ClassHandle.prototype;
+ }
+ var constructor = createNamedFunction(legalFunctionName, function () {
+ if (Object.getPrototypeOf(this) !== instancePrototype) {
+ throw new BindingError("Use 'new' to construct " + name);
+ }
+ if (undefined === registeredClass.constructor_body) {
+ throw new BindingError(name + " has no accessible constructor");
+ }
+ var body = registeredClass.constructor_body[arguments.length];
+ if (undefined === body) {
+ throw new BindingError(
+ `Tried to invoke ctor of ${name} with invalid number of parameters (${arguments.length}) - expected (${Object.keys(registeredClass.constructor_body).toString()}) parameters instead!`,
+ );
+ }
+ return body.apply(this, arguments);
+ });
+ var instancePrototype = Object.create(basePrototype, {
+ constructor: {
+ value: constructor,
+ },
+ });
+ constructor.prototype = instancePrototype;
+ var registeredClass = new RegisteredClass(
+ name,
+ constructor,
+ instancePrototype,
+ rawDestructor,
+ baseClass,
+ getActualType,
+ upcast,
+ downcast,
+ );
+ if (registeredClass.baseClass) {
+ if (registeredClass.baseClass.__derivedClasses === undefined) {
+ registeredClass.baseClass.__derivedClasses = [];
+ }
+ registeredClass.baseClass.__derivedClasses.push(registeredClass);
+ }
+ var referenceConverter = new RegisteredPointer(
+ name,
+ registeredClass,
+ true,
+ false,
+ false,
+ );
+ var pointerConverter = new RegisteredPointer(
+ name + "*",
+ registeredClass,
+ false,
+ false,
+ false,
+ );
+ var constPointerConverter = new RegisteredPointer(
+ name + " const*",
+ registeredClass,
+ false,
+ true,
+ false,
+ );
+ registeredPointers[rawType] = {
+ pointerType: pointerConverter,
+ constPointerType: constPointerConverter,
+ };
+ replacePublicSymbol(legalFunctionName, constructor);
+ return [referenceConverter, pointerConverter, constPointerConverter];
+ },
+ );
+ }
+
+ var runDestructors = (destructors) => {
+ while (destructors.length) {
+ var ptr = destructors.pop();
+ var del = destructors.pop();
+ del(ptr);
+ }
+ };
+
+ function newFunc(constructor, argumentList) {
+ if (!(constructor instanceof Function)) {
+ throw new TypeError(
+ `new_ called with constructor type ${typeof constructor} which is not a function`,
+ );
+ }
+ /*
+ * Previously, the following line was just:
+ * function dummy() {};
+ * Unfortunately, Chrome was preserving 'dummy' as the object's name, even
+ * though at creation, the 'dummy' has the correct constructor name. Thus,
+ * objects created with IMVU.new would show up in the debugger as 'dummy',
+ * which isn't very helpful. Using IMVU.createNamedFunction addresses the
+ * issue. Doublely-unfortunately, there's no way to write a test for this
+ * behavior. -NRD 2013.02.22
+ */ var dummy = createNamedFunction(
+ constructor.name || "unknownFunctionName",
+ function () {},
+ );
+ dummy.prototype = constructor.prototype;
+ var obj = new dummy();
+ var r = constructor.apply(obj, argumentList);
+ return r instanceof Object ? r : obj;
+ }
+
+ var runAndAbortIfError = (func) => {
+ try {
+ return func();
+ } catch (e) {
+ abort(e);
+ }
+ };
+
+ var maybeExit = () => {
+ if (!keepRuntimeAlive()) {
+ try {
+ if (ENVIRONMENT_IS_PTHREAD) __emscripten_thread_exit(EXITSTATUS);
+ else _exit(EXITSTATUS);
+ } catch (e) {
+ handleException(e);
+ }
+ }
+ };
+
+ var callUserCallback = (func) => {
+ if (ABORT) {
+ return;
+ }
+ try {
+ func();
+ maybeExit();
+ } catch (e) {
+ handleException(e);
+ }
+ };
+
+ var sigToWasmTypes = (sig) => {
+ var typeNames = {
+ i: "i32",
+ j: "i64",
+ f: "f32",
+ d: "f64",
+ e: "externref",
+ p: "i32",
+ };
+ var type = {
+ parameters: [],
+ results: sig[0] == "v" ? [] : [typeNames[sig[0]]],
+ };
+ for (var i = 1; i < sig.length; ++i) {
+ type.parameters.push(typeNames[sig[i]]);
+ }
+ return type;
+ };
+
+ var runtimeKeepalivePush = () => {
+ runtimeKeepaliveCounter += 1;
+ };
+
+ var runtimeKeepalivePop = () => {
+ runtimeKeepaliveCounter -= 1;
+ };
+
+ var Asyncify = {
+ instrumentWasmImports(imports) {
+ var importPattern = /^(invoke_.*|__asyncjs__.*)$/;
+ for (var x in imports) {
+ (function (x) {
+ var original = imports[x];
+ var sig = original.sig;
+ if (typeof original == "function") {
+ var isAsyncifyImport = original.isAsync || importPattern.test(x);
+ }
+ })(x);
+ }
+ },
+ instrumentWasmExports(exports) {
+ var ret = {};
+ for (var x in exports) {
+ (function (x) {
+ var original = exports[x];
+ if (typeof original == "function") {
+ ret[x] = function () {
+ Asyncify.exportCallStack.push(x);
+ try {
+ return original.apply(null, arguments);
+ } finally {
+ if (!ABORT) {
+ var y = Asyncify.exportCallStack.pop();
+ assert(y === x);
+ Asyncify.maybeStopUnwind();
+ }
+ }
+ };
+ } else {
+ ret[x] = original;
+ }
+ })(x);
+ }
+ return ret;
+ },
+ State: {
+ Normal: 0,
+ Unwinding: 1,
+ Rewinding: 2,
+ Disabled: 3,
+ },
+ state: 0,
+ StackSize: 4096,
+ currData: null,
+ handleSleepReturnValue: 0,
+ exportCallStack: [],
+ callStackNameToId: {},
+ callStackIdToName: {},
+ callStackId: 0,
+ asyncPromiseHandlers: null,
+ sleepCallbacks: [],
+ getCallStackId(funcName) {
+ var id = Asyncify.callStackNameToId[funcName];
+ if (id === undefined) {
+ id = Asyncify.callStackId++;
+ Asyncify.callStackNameToId[funcName] = id;
+ Asyncify.callStackIdToName[id] = funcName;
+ }
+ return id;
+ },
+ maybeStopUnwind() {
+ if (
+ Asyncify.currData &&
+ Asyncify.state === Asyncify.State.Unwinding &&
+ Asyncify.exportCallStack.length === 0
+ ) {
+ Asyncify.state = Asyncify.State.Normal;
+ runtimeKeepalivePush();
+ runAndAbortIfError(_asyncify_stop_unwind);
+ if (typeof Fibers != "undefined") {
+ Fibers.trampoline();
+ }
+ }
+ },
+ whenDone() {
+ return new Promise((resolve, reject) => {
+ Asyncify.asyncPromiseHandlers = {
+ resolve: resolve,
+ reject: reject,
+ };
+ });
+ },
+ allocateData() {
+ var ptr = _malloc(12 + Asyncify.StackSize);
+ Asyncify.setDataHeader(ptr, ptr + 12, Asyncify.StackSize);
+ Asyncify.setDataRewindFunc(ptr);
+ return ptr;
+ },
+ setDataHeader(ptr, stack, stackSize) {
+ GROWABLE_HEAP_U32()[(ptr >>> 2) >>> 0] = stack;
+ GROWABLE_HEAP_U32()[((ptr + 4) >>> 2) >>> 0] = stack + stackSize;
+ },
+ setDataRewindFunc(ptr) {
+ var bottomOfCallStack = Asyncify.exportCallStack[0];
+ var rewindId = Asyncify.getCallStackId(bottomOfCallStack);
+ GROWABLE_HEAP_I32()[((ptr + 8) >>> 2) >>> 0] = rewindId;
+ },
+ getDataRewindFunc(ptr) {
+ var id = GROWABLE_HEAP_I32()[((ptr + 8) >>> 2) >>> 0];
+ var name = Asyncify.callStackIdToName[id];
+ var func = wasmExports[name];
+ return func;
+ },
+ doRewind(ptr) {
+ var start = Asyncify.getDataRewindFunc(ptr);
+ runtimeKeepalivePop();
+ return start();
+ },
+ handleSleep(startAsync) {
+ if (ABORT) return;
+ if (Asyncify.state === Asyncify.State.Normal) {
+ var reachedCallback = false;
+ var reachedAfterCallback = false;
+ startAsync((handleSleepReturnValue = 0) => {
+ if (ABORT) return;
+ Asyncify.handleSleepReturnValue = handleSleepReturnValue;
+ reachedCallback = true;
+ if (!reachedAfterCallback) {
+ return;
+ }
+ Asyncify.state = Asyncify.State.Rewinding;
+ runAndAbortIfError(() => _asyncify_start_rewind(Asyncify.currData));
+ if (typeof Browser != "undefined" && Browser.mainLoop.func) {
+ Browser.mainLoop.resume();
+ }
+ var asyncWasmReturnValue,
+ isError = false;
+ try {
+ asyncWasmReturnValue = Asyncify.doRewind(Asyncify.currData);
+ } catch (err) {
+ asyncWasmReturnValue = err;
+ isError = true;
+ }
+ var handled = false;
+ if (!Asyncify.currData) {
+ var asyncPromiseHandlers = Asyncify.asyncPromiseHandlers;
+ if (asyncPromiseHandlers) {
+ Asyncify.asyncPromiseHandlers = null;
+ (isError
+ ? asyncPromiseHandlers.reject
+ : asyncPromiseHandlers.resolve)(asyncWasmReturnValue);
+ handled = true;
+ }
+ }
+ if (isError && !handled) {
+ throw asyncWasmReturnValue;
+ }
+ });
+ reachedAfterCallback = true;
+ if (!reachedCallback) {
+ Asyncify.state = Asyncify.State.Unwinding;
+ Asyncify.currData = Asyncify.allocateData();
+ if (typeof Browser != "undefined" && Browser.mainLoop.func) {
+ Browser.mainLoop.pause();
+ }
+ runAndAbortIfError(() => _asyncify_start_unwind(Asyncify.currData));
+ }
+ } else if (Asyncify.state === Asyncify.State.Rewinding) {
+ Asyncify.state = Asyncify.State.Normal;
+ runAndAbortIfError(_asyncify_stop_rewind);
+ _free(Asyncify.currData);
+ Asyncify.currData = null;
+ Asyncify.sleepCallbacks.forEach((func) => callUserCallback(func));
+ } else {
+ abort(`invalid state: ${Asyncify.state}`);
+ }
+ return Asyncify.handleSleepReturnValue;
+ },
+ handleAsync(startAsync) {
+ return Asyncify.handleSleep((wakeUp) => {
+ startAsync().then(wakeUp);
+ });
+ },
+ };
+
+ function craftInvokerFunction(
+ humanName,
+ argTypes,
+ classType,
+ cppInvokerFunc,
+ cppTargetFunc,
+ /** boolean= */ isAsync,
+ ) {
+ var argCount = argTypes.length;
+ if (argCount < 2) {
+ throwBindingError(
+ "argTypes array size mismatch! Must at least get return value and 'this' types!",
+ );
+ }
+ var isClassMethodFunc = argTypes[1] !== null && classType !== null;
+ var needsDestructorStack = false;
+ for (var i = 1; i < argTypes.length; ++i) {
+ if (
+ argTypes[i] !== null &&
+ argTypes[i].destructorFunction === undefined
+ ) {
+ needsDestructorStack = true;
+ break;
+ }
+ }
+ var returns = argTypes[0].name !== "void";
+ var argsList = "";
+ var argsListWired = "";
+ for (var i = 0; i < argCount - 2; ++i) {
+ argsList += (i !== 0 ? ", " : "") + "arg" + i;
+ argsListWired += (i !== 0 ? ", " : "") + "arg" + i + "Wired";
+ }
+ var invokerFnBody = `\n return function ${makeLegalFunctionName(humanName)}(${argsList}) {\n if (arguments.length !== ${argCount - 2}) {\n throwBindingError('function ${humanName} called with ' + arguments.length + ' arguments, expected ${argCount - 2}');\n }`;
+ if (needsDestructorStack) {
+ invokerFnBody += "var destructors = [];\n";
+ }
+ var dtorStack = needsDestructorStack ? "destructors" : "null";
+ var args1 = [
+ "throwBindingError",
+ "invoker",
+ "fn",
+ "runDestructors",
+ "retType",
+ "classParam",
+ ];
+ var args2 = [
+ throwBindingError,
+ cppInvokerFunc,
+ cppTargetFunc,
+ runDestructors,
+ argTypes[0],
+ argTypes[1],
+ ];
+ if (isClassMethodFunc) {
+ invokerFnBody +=
+ "var thisWired = classParam.toWireType(" + dtorStack + ", this);\n";
+ }
+ for (var i = 0; i < argCount - 2; ++i) {
+ invokerFnBody +=
+ "var arg" +
+ i +
+ "Wired = argType" +
+ i +
+ ".toWireType(" +
+ dtorStack +
+ ", arg" +
+ i +
+ "); // " +
+ argTypes[i + 2].name +
+ "\n";
+ args1.push("argType" + i);
+ args2.push(argTypes[i + 2]);
+ }
+ if (isClassMethodFunc) {
+ argsListWired =
+ "thisWired" + (argsListWired.length > 0 ? ", " : "") + argsListWired;
+ }
+ invokerFnBody +=
+ (returns || isAsync ? "var rv = " : "") +
+ "invoker(fn" +
+ (argsListWired.length > 0 ? ", " : "") +
+ argsListWired +
+ ");\n";
+ args1.push("Asyncify");
+ args2.push(Asyncify);
+ invokerFnBody += "function onDone(" + (returns ? "rv" : "") + ") {\n";
+ if (needsDestructorStack) {
+ invokerFnBody += "runDestructors(destructors);\n";
+ } else {
+ for (var i = isClassMethodFunc ? 1 : 2; i < argTypes.length; ++i) {
+ var paramName = i === 1 ? "thisWired" : "arg" + (i - 2) + "Wired";
+ if (argTypes[i].destructorFunction !== null) {
+ invokerFnBody +=
+ paramName +
+ "_dtor(" +
+ paramName +
+ "); // " +
+ argTypes[i].name +
+ "\n";
+ args1.push(paramName + "_dtor");
+ args2.push(argTypes[i].destructorFunction);
+ }
+ }
+ }
+ if (returns) {
+ invokerFnBody +=
+ "var ret = retType.fromWireType(rv);\n" + "return ret;\n";
+ } else {
+ }
+ invokerFnBody += "}\n";
+ invokerFnBody +=
+ "return Asyncify.currData ? Asyncify.whenDone().then(onDone) : onDone(" +
+ (returns ? "rv" : "") +
+ ");\n";
+ invokerFnBody += "}\n";
+ args1.push(invokerFnBody);
+ return newFunc(Function, args1).apply(null, args2);
+ }
+
+ var heap32VectorToArray = (count, firstElement) => {
+ var array = [];
+ for (var i = 0; i < count; i++) {
+ array.push(GROWABLE_HEAP_U32()[((firstElement + i * 4) >>> 2) >>> 0]);
+ }
+ return array;
+ };
+
+ var getFunctionName = (signature) => {
+ signature = signature.trim();
+ const argsIndex = signature.indexOf("(");
+ if (argsIndex !== -1) {
+ assert(
+ signature[signature.length - 1] == ")",
+ "Parentheses for argument names should match.",
+ );
+ return signature.substr(0, argsIndex);
+ } else {
+ return signature;
+ }
+ };
+
+ function __embind_register_class_class_function(
+ rawClassType,
+ methodName,
+ argCount,
+ rawArgTypesAddr,
+ invokerSignature,
+ rawInvoker,
+ fn,
+ isAsync,
+ ) {
+ rawClassType >>>= 0;
+ methodName >>>= 0;
+ rawArgTypesAddr >>>= 0;
+ invokerSignature >>>= 0;
+ rawInvoker >>>= 0;
+ fn >>>= 0;
+ var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr);
+ methodName = readLatin1String(methodName);
+ methodName = getFunctionName(methodName);
+ rawInvoker = embind__requireFunction(invokerSignature, rawInvoker);
+ whenDependentTypesAreResolved([], [rawClassType], function (classType) {
+ classType = classType[0];
+ var humanName = `${classType.name}.${methodName}`;
+ function unboundTypesHandler() {
+ throwUnboundTypeError(
+ `Cannot call ${humanName} due to unbound types`,
+ rawArgTypes,
+ );
+ }
+ if (methodName.startsWith("@@")) {
+ methodName = Symbol[methodName.substring(2)];
+ }
+ var proto = classType.registeredClass.constructor;
+ if (undefined === proto[methodName]) {
+ unboundTypesHandler.argCount = argCount - 1;
+ proto[methodName] = unboundTypesHandler;
+ } else {
+ ensureOverloadTable(proto, methodName, humanName);
+ proto[methodName].overloadTable[argCount - 1] = unboundTypesHandler;
+ }
+ whenDependentTypesAreResolved([], rawArgTypes, function (argTypes) {
+ var invokerArgsArray = [argTypes[0], /* return value */ null].concat(
+ /* no class 'this'*/ argTypes.slice(1),
+ );
+ /* actual params */ var func = craftInvokerFunction(
+ humanName,
+ invokerArgsArray,
+ null,
+ /* no class 'this'*/ rawInvoker,
+ fn,
+ isAsync,
+ );
+ if (undefined === proto[methodName].overloadTable) {
+ func.argCount = argCount - 1;
+ proto[methodName] = func;
+ } else {
+ proto[methodName].overloadTable[argCount - 1] = func;
+ }
+ if (classType.registeredClass.__derivedClasses) {
+ for (const derivedClass of classType.registeredClass
+ .__derivedClasses) {
+ if (!derivedClass.constructor.hasOwnProperty(methodName)) {
+ derivedClass.constructor[methodName] = func;
+ }
+ }
+ }
+ return [];
+ });
+ return [];
+ });
+ }
+
+ var validateThis = (this_, classType, humanName) => {
+ if (!(this_ instanceof Object)) {
+ throwBindingError(`${humanName} with invalid "this": ${this_}`);
+ }
+ if (!(this_ instanceof classType.registeredClass.constructor)) {
+ throwBindingError(
+ `${humanName} incompatible with "this" of type ${this_.constructor.name}`,
+ );
+ }
+ if (!this_.$$.ptr) {
+ throwBindingError(
+ `cannot call emscripten binding method ${humanName} on deleted object`,
+ );
+ }
+ return upcastPointer(
+ this_.$$.ptr,
+ this_.$$.ptrType.registeredClass,
+ classType.registeredClass,
+ );
+ };
+
+ function __embind_register_class_class_property(
+ rawClassType,
+ fieldName,
+ rawFieldType,
+ rawFieldPtr,
+ getterSignature,
+ getter,
+ setterSignature,
+ setter,
+ ) {
+ rawClassType >>>= 0;
+ fieldName >>>= 0;
+ rawFieldType >>>= 0;
+ rawFieldPtr >>>= 0;
+ getterSignature >>>= 0;
+ getter >>>= 0;
+ setterSignature >>>= 0;
+ setter >>>= 0;
+ fieldName = readLatin1String(fieldName);
+ getter = embind__requireFunction(getterSignature, getter);
+ whenDependentTypesAreResolved([], [rawClassType], function (classType) {
+ classType = classType[0];
+ var humanName = `${classType.name}.${fieldName}`;
+ var desc = {
+ get() {
+ throwUnboundTypeError(
+ `Cannot access ${humanName} due to unbound types`,
+ [rawFieldType],
+ );
+ },
+ enumerable: true,
+ configurable: true,
+ };
+ if (setter) {
+ desc.set = () => {
+ throwUnboundTypeError(
+ `Cannot access ${humanName} due to unbound types`,
+ [rawFieldType],
+ );
+ };
+ } else {
+ desc.set = (v) => {
+ throwBindingError(`${humanName} is a read-only property`);
+ };
+ }
+ Object.defineProperty(
+ classType.registeredClass.constructor,
+ fieldName,
+ desc,
+ );
+ whenDependentTypesAreResolved([], [rawFieldType], function (fieldType) {
+ fieldType = fieldType[0];
+ var desc = {
+ get() {
+ return fieldType["fromWireType"](getter(rawFieldPtr));
+ },
+ enumerable: true,
+ };
+ if (setter) {
+ setter = embind__requireFunction(setterSignature, setter);
+ desc.set = (v) => {
+ var destructors = [];
+ setter(rawFieldPtr, fieldType["toWireType"](destructors, v));
+ runDestructors(destructors);
+ };
+ }
+ Object.defineProperty(
+ classType.registeredClass.constructor,
+ fieldName,
+ desc,
+ );
+ return [];
+ });
+ return [];
+ });
+ }
+
+ function __embind_register_class_constructor(
+ rawClassType,
+ argCount,
+ rawArgTypesAddr,
+ invokerSignature,
+ invoker,
+ rawConstructor,
+ ) {
+ rawClassType >>>= 0;
+ rawArgTypesAddr >>>= 0;
+ invokerSignature >>>= 0;
+ invoker >>>= 0;
+ rawConstructor >>>= 0;
+ var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr);
+ invoker = embind__requireFunction(invokerSignature, invoker);
+ var args = [rawConstructor];
+ var destructors = [];
+ whenDependentTypesAreResolved([], [rawClassType], function (classType) {
+ classType = classType[0];
+ var humanName = `constructor ${classType.name}`;
+ if (undefined === classType.registeredClass.constructor_body) {
+ classType.registeredClass.constructor_body = [];
+ }
+ if (
+ undefined !== classType.registeredClass.constructor_body[argCount - 1]
+ ) {
+ throw new BindingError(
+ `Cannot register multiple constructors with identical number of parameters (${argCount - 1}) for class '${classType.name}'! Overload resolution is currently only performed using the parameter count, not actual type info!`,
+ );
+ }
+ classType.registeredClass.constructor_body[argCount - 1] = () => {
+ throwUnboundTypeError(
+ `Cannot construct ${classType.name} due to unbound types`,
+ rawArgTypes,
+ );
+ };
+ whenDependentTypesAreResolved([], rawArgTypes, (argTypes) => {
+ argTypes.splice(1, 0, null);
+ classType.registeredClass.constructor_body[argCount - 1] =
+ craftInvokerFunction(
+ humanName,
+ argTypes,
+ null,
+ invoker,
+ rawConstructor,
+ );
+ return [];
+ });
+ return [];
+ });
+ }
+
+ function __embind_register_class_function(
+ rawClassType,
+ methodName,
+ argCount,
+ rawArgTypesAddr,
+ invokerSignature,
+ rawInvoker,
+ context,
+ isPureVirtual,
+ isAsync,
+ ) {
+ rawClassType >>>= 0;
+ methodName >>>= 0;
+ rawArgTypesAddr >>>= 0;
+ invokerSignature >>>= 0;
+ rawInvoker >>>= 0;
+ context >>>= 0;
+ var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr);
+ methodName = readLatin1String(methodName);
+ methodName = getFunctionName(methodName);
+ rawInvoker = embind__requireFunction(invokerSignature, rawInvoker);
+ whenDependentTypesAreResolved([], [rawClassType], function (classType) {
+ classType = classType[0];
+ var humanName = `${classType.name}.${methodName}`;
+ if (methodName.startsWith("@@")) {
+ methodName = Symbol[methodName.substring(2)];
+ }
+ if (isPureVirtual) {
+ classType.registeredClass.pureVirtualFunctions.push(methodName);
+ }
+ function unboundTypesHandler() {
+ throwUnboundTypeError(
+ `Cannot call ${humanName} due to unbound types`,
+ rawArgTypes,
+ );
+ }
+ var proto = classType.registeredClass.instancePrototype;
+ var method = proto[methodName];
+ if (
+ undefined === method ||
+ (undefined === method.overloadTable &&
+ method.className !== classType.name &&
+ method.argCount === argCount - 2)
+ ) {
+ unboundTypesHandler.argCount = argCount - 2;
+ unboundTypesHandler.className = classType.name;
+ proto[methodName] = unboundTypesHandler;
+ } else {
+ ensureOverloadTable(proto, methodName, humanName);
+ proto[methodName].overloadTable[argCount - 2] = unboundTypesHandler;
+ }
+ whenDependentTypesAreResolved([], rawArgTypes, function (argTypes) {
+ var memberFunction = craftInvokerFunction(
+ humanName,
+ argTypes,
+ classType,
+ rawInvoker,
+ context,
+ isAsync,
+ );
+ if (undefined === proto[methodName].overloadTable) {
+ memberFunction.argCount = argCount - 2;
+ proto[methodName] = memberFunction;
+ } else {
+ proto[methodName].overloadTable[argCount - 2] = memberFunction;
+ }
+ return [];
+ });
+ return [];
+ });
+ }
+
+ function __embind_register_class_property(
+ classType,
+ fieldName,
+ getterReturnType,
+ getterSignature,
+ getter,
+ getterContext,
+ setterArgumentType,
+ setterSignature,
+ setter,
+ setterContext,
+ ) {
+ classType >>>= 0;
+ fieldName >>>= 0;
+ getterReturnType >>>= 0;
+ getterSignature >>>= 0;
+ getter >>>= 0;
+ getterContext >>>= 0;
+ setterArgumentType >>>= 0;
+ setterSignature >>>= 0;
+ setter >>>= 0;
+ setterContext >>>= 0;
+ fieldName = readLatin1String(fieldName);
+ getter = embind__requireFunction(getterSignature, getter);
+ whenDependentTypesAreResolved([], [classType], function (classType) {
+ classType = classType[0];
+ var humanName = `${classType.name}.${fieldName}`;
+ var desc = {
+ get() {
+ throwUnboundTypeError(
+ `Cannot access ${humanName} due to unbound types`,
+ [getterReturnType, setterArgumentType],
+ );
+ },
+ enumerable: true,
+ configurable: true,
+ };
+ if (setter) {
+ desc.set = () =>
+ throwUnboundTypeError(
+ `Cannot access ${humanName} due to unbound types`,
+ [getterReturnType, setterArgumentType],
+ );
+ } else {
+ desc.set = (v) =>
+ throwBindingError(humanName + " is a read-only property");
+ }
+ Object.defineProperty(
+ classType.registeredClass.instancePrototype,
+ fieldName,
+ desc,
+ );
+ whenDependentTypesAreResolved(
+ [],
+ setter ? [getterReturnType, setterArgumentType] : [getterReturnType],
+ function (types) {
+ var getterReturnType = types[0];
+ var desc = {
+ get() {
+ var ptr = validateThis(this, classType, humanName + " getter");
+ return getterReturnType["fromWireType"](
+ getter(getterContext, ptr),
+ );
+ },
+ enumerable: true,
+ };
+ if (setter) {
+ setter = embind__requireFunction(setterSignature, setter);
+ var setterArgumentType = types[1];
+ desc.set = function (v) {
+ var ptr = validateThis(this, classType, humanName + " setter");
+ var destructors = [];
+ setter(
+ setterContext,
+ ptr,
+ setterArgumentType["toWireType"](destructors, v),
+ );
+ runDestructors(destructors);
+ };
+ }
+ Object.defineProperty(
+ classType.registeredClass.instancePrototype,
+ fieldName,
+ desc,
+ );
+ return [];
+ },
+ );
+ return [];
+ });
+ }
+
+ function handleAllocatorInit() {
+ Object.assign(
+ HandleAllocator.prototype,
+ /** @lends {HandleAllocator.prototype} */ {
+ get(id) {
+ return this.allocated[id];
+ },
+ has(id) {
+ return this.allocated[id] !== undefined;
+ },
+ allocate(handle) {
+ var id = this.freelist.pop() || this.allocated.length;
+ this.allocated[id] = handle;
+ return id;
+ },
+ free(id) {
+ this.allocated[id] = undefined;
+ this.freelist.push(id);
+ },
+ },
+ );
+ }
+
+ /** @constructor */ function HandleAllocator() {
+ this.allocated = [undefined];
+ this.freelist = [];
+ }
+
+ var emval_handles = new HandleAllocator();
+
+ function __emval_decref(handle) {
+ handle >>>= 0;
+ if (
+ handle >= emval_handles.reserved &&
+ 0 === --emval_handles.get(handle).refcount
+ ) {
+ emval_handles.free(handle);
+ }
+ }
+
+ var count_emval_handles = () => {
+ var count = 0;
+ for (
+ var i = emval_handles.reserved;
+ i < emval_handles.allocated.length;
+ ++i
+ ) {
+ if (emval_handles.allocated[i] !== undefined) {
+ ++count;
+ }
+ }
+ return count;
+ };
+
+ var init_emval = () => {
+ emval_handles.allocated.push(
+ {
+ value: undefined,
+ },
+ {
+ value: null,
+ },
+ {
+ value: true,
+ },
+ {
+ value: false,
+ },
+ );
+ emval_handles.reserved = emval_handles.allocated.length;
+ Module["count_emval_handles"] = count_emval_handles;
+ };
+
+ var Emval = {
+ toValue: (handle) => {
+ if (!handle) {
+ throwBindingError("Cannot use deleted val. handle = " + handle);
+ }
+ return emval_handles.get(handle).value;
+ },
+ toHandle: (value) => {
+ switch (value) {
+ case undefined:
+ return 1;
+
+ case null:
+ return 2;
+
+ case true:
+ return 3;
+
+ case false:
+ return 4;
+
+ default: {
+ return emval_handles.allocate({
+ refcount: 1,
+ value: value,
+ });
+ }
+ }
+ },
+ };
+
+ /** @suppress {globalThis} */ function simpleReadValueFromPointer(pointer) {
+ return this["fromWireType"](GROWABLE_HEAP_I32()[(pointer >>> 2) >>> 0]);
+ }
+
+ var __embind_register_emval = function (rawType, name) {
+ rawType >>>= 0;
+ name >>>= 0;
+ name = readLatin1String(name);
+ registerType(rawType, {
+ name: name,
+ fromWireType: (handle) => {
+ var rv = Emval.toValue(handle);
+ __emval_decref(handle);
+ return rv;
+ },
+ toWireType: (destructors, value) => Emval.toHandle(value),
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: simpleReadValueFromPointer,
+ destructorFunction: null,
+ });
+ };
+
+ var enumReadValueFromPointer = (name, width, signed) => {
+ switch (width) {
+ case 1:
+ return signed
+ ? function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_I8()[(pointer >>> 0) >>> 0],
+ );
+ }
+ : function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_U8()[(pointer >>> 0) >>> 0],
+ );
+ };
+
+ case 2:
+ return signed
+ ? function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_I16()[(pointer >>> 1) >>> 0],
+ );
+ }
+ : function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_U16()[(pointer >>> 1) >>> 0],
+ );
+ };
+
+ case 4:
+ return signed
+ ? function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_I32()[(pointer >>> 2) >>> 0],
+ );
+ }
+ : function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_U32()[(pointer >>> 2) >>> 0],
+ );
+ };
+
+ default:
+ throw new TypeError(`invalid integer width (${width}): ${name}`);
+ }
+ };
+
+ /** @suppress {globalThis} */ function __embind_register_enum(
+ rawType,
+ name,
+ size,
+ isSigned,
+ ) {
+ rawType >>>= 0;
+ name >>>= 0;
+ size >>>= 0;
+ name = readLatin1String(name);
+ function ctor() {}
+ ctor.values = {};
+ registerType(rawType, {
+ name: name,
+ constructor: ctor,
+ fromWireType: function (c) {
+ return this.constructor.values[c];
+ },
+ toWireType: (destructors, c) => c.value,
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: enumReadValueFromPointer(name, size, isSigned),
+ destructorFunction: null,
+ });
+ exposePublicSymbol(name, ctor);
+ }
+
+ var requireRegisteredType = (rawType, humanName) => {
+ var impl = registeredTypes[rawType];
+ if (undefined === impl) {
+ throwBindingError(
+ humanName + " has unknown type " + getTypeName(rawType),
+ );
+ }
+ return impl;
+ };
+
+ function __embind_register_enum_value(rawEnumType, name, enumValue) {
+ rawEnumType >>>= 0;
+ name >>>= 0;
+ enumValue >>>= 0;
+ var enumType = requireRegisteredType(rawEnumType, "enum");
+ name = readLatin1String(name);
+ var Enum = enumType.constructor;
+ var Value = Object.create(enumType.constructor.prototype, {
+ value: {
+ value: enumValue,
+ },
+ constructor: {
+ value: createNamedFunction(
+ `${enumType.name}_${name}`,
+ function () {},
+ ),
+ },
+ });
+ Enum.values[enumValue] = Value;
+ Enum[name] = Value;
+ }
+
+ var embindRepr = (v) => {
+ if (v === null) {
+ return "null";
+ }
+ var t = typeof v;
+ if (t === "object" || t === "array" || t === "function") {
+ return v.toString();
+ } else {
+ return "" + v;
+ }
+ };
+
+ var floatReadValueFromPointer = (name, width) => {
+ switch (width) {
+ case 4:
+ return function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_F32()[(pointer >>> 2) >>> 0],
+ );
+ };
+
+ case 8:
+ return function (pointer) {
+ return this["fromWireType"](
+ GROWABLE_HEAP_F64()[(pointer >>> 3) >>> 0],
+ );
+ };
+
+ default:
+ throw new TypeError(`invalid float width (${width}): ${name}`);
+ }
+ };
+
+ var __embind_register_float = function (rawType, name, size) {
+ rawType >>>= 0;
+ name >>>= 0;
+ size >>>= 0;
+ name = readLatin1String(name);
+ registerType(rawType, {
+ name: name,
+ fromWireType: (value) => value,
+ toWireType: (destructors, value) => value,
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: floatReadValueFromPointer(name, size),
+ destructorFunction: null,
+ });
+ };
+
+ function __embind_register_function(
+ name,
+ argCount,
+ rawArgTypesAddr,
+ signature,
+ rawInvoker,
+ fn,
+ isAsync,
+ ) {
+ name >>>= 0;
+ rawArgTypesAddr >>>= 0;
+ signature >>>= 0;
+ rawInvoker >>>= 0;
+ fn >>>= 0;
+ var argTypes = heap32VectorToArray(argCount, rawArgTypesAddr);
+ name = readLatin1String(name);
+ name = getFunctionName(name);
+ rawInvoker = embind__requireFunction(signature, rawInvoker);
+ exposePublicSymbol(
+ name,
+ function () {
+ throwUnboundTypeError(
+ `Cannot call ${name} due to unbound types`,
+ argTypes,
+ );
+ },
+ argCount - 1,
+ );
+ whenDependentTypesAreResolved([], argTypes, function (argTypes) {
+ var invokerArgsArray = [argTypes[0], /* return value */ null].concat(
+ /* no class 'this'*/ argTypes.slice(1),
+ );
+ /* actual params */ replacePublicSymbol(
+ name,
+ craftInvokerFunction(
+ name,
+ invokerArgsArray,
+ null,
+ /* no class 'this'*/ rawInvoker,
+ fn,
+ isAsync,
+ ),
+ argCount - 1,
+ );
+ return [];
+ });
+ }
+
+ var integerReadValueFromPointer = (name, width, signed) => {
+ switch (width) {
+ case 1:
+ return signed
+ ? (pointer) => GROWABLE_HEAP_I8()[(pointer >>> 0) >>> 0]
+ : (pointer) => GROWABLE_HEAP_U8()[(pointer >>> 0) >>> 0];
+
+ case 2:
+ return signed
+ ? (pointer) => GROWABLE_HEAP_I16()[(pointer >>> 1) >>> 0]
+ : (pointer) => GROWABLE_HEAP_U16()[(pointer >>> 1) >>> 0];
+
+ case 4:
+ return signed
+ ? (pointer) => GROWABLE_HEAP_I32()[(pointer >>> 2) >>> 0]
+ : (pointer) => GROWABLE_HEAP_U32()[(pointer >>> 2) >>> 0];
+
+ default:
+ throw new TypeError(`invalid integer width (${width}): ${name}`);
+ }
+ };
+
+ /** @suppress {globalThis} */ function __embind_register_integer(
+ primitiveType,
+ name,
+ size,
+ minRange,
+ maxRange,
+ ) {
+ primitiveType >>>= 0;
+ name >>>= 0;
+ size >>>= 0;
+ name = readLatin1String(name);
+ if (maxRange === -1) {
+ maxRange = 4294967295;
+ }
+ var fromWireType = (value) => value;
+ if (minRange === 0) {
+ var bitshift = 32 - 8 * size;
+ fromWireType = (value) => (value << bitshift) >>> bitshift;
+ }
+ var isUnsignedType = name.includes("unsigned");
+ var checkAssertions = (value, toTypeName) => {};
+ var toWireType;
+ if (isUnsignedType) {
+ toWireType = function (destructors, value) {
+ checkAssertions(value, this.name);
+ return value >>> 0;
+ };
+ } else {
+ toWireType = function (destructors, value) {
+ checkAssertions(value, this.name);
+ return value;
+ };
+ }
+ registerType(primitiveType, {
+ name: name,
+ fromWireType: fromWireType,
+ toWireType: toWireType,
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: integerReadValueFromPointer(
+ name,
+ size,
+ minRange !== 0,
+ ),
+ destructorFunction: null,
+ });
+ }
+
+ function __embind_register_memory_view(rawType, dataTypeIndex, name) {
+ rawType >>>= 0;
+ name >>>= 0;
+ var typeMapping = [
+ Int8Array,
+ Uint8Array,
+ Int16Array,
+ Uint16Array,
+ Int32Array,
+ Uint32Array,
+ Float32Array,
+ Float64Array,
+ ];
+ var TA = typeMapping[dataTypeIndex];
+ function decodeMemoryView(handle) {
+ var size = GROWABLE_HEAP_U32()[(handle >>> 2) >>> 0];
+ var data = GROWABLE_HEAP_U32()[((handle + 4) >>> 2) >>> 0];
+ return new TA(GROWABLE_HEAP_I8().buffer, data, size);
+ }
+ name = readLatin1String(name);
+ registerType(
+ rawType,
+ {
+ name: name,
+ fromWireType: decodeMemoryView,
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: decodeMemoryView,
+ },
+ {
+ ignoreDuplicateRegistrations: true,
+ },
+ );
+ }
+
+ function __embind_register_smart_ptr(
+ rawType,
+ rawPointeeType,
+ name,
+ sharingPolicy,
+ getPointeeSignature,
+ rawGetPointee,
+ constructorSignature,
+ rawConstructor,
+ shareSignature,
+ rawShare,
+ destructorSignature,
+ rawDestructor,
+ ) {
+ rawType >>>= 0;
+ rawPointeeType >>>= 0;
+ name >>>= 0;
+ getPointeeSignature >>>= 0;
+ rawGetPointee >>>= 0;
+ constructorSignature >>>= 0;
+ rawConstructor >>>= 0;
+ shareSignature >>>= 0;
+ rawShare >>>= 0;
+ destructorSignature >>>= 0;
+ rawDestructor >>>= 0;
+ name = readLatin1String(name);
+ rawGetPointee = embind__requireFunction(
+ getPointeeSignature,
+ rawGetPointee,
+ );
+ rawConstructor = embind__requireFunction(
+ constructorSignature,
+ rawConstructor,
+ );
+ rawShare = embind__requireFunction(shareSignature, rawShare);
+ rawDestructor = embind__requireFunction(
+ destructorSignature,
+ rawDestructor,
+ );
+ whenDependentTypesAreResolved(
+ [rawType],
+ [rawPointeeType],
+ function (pointeeType) {
+ pointeeType = pointeeType[0];
+ var registeredPointer = new RegisteredPointer(
+ name,
+ pointeeType.registeredClass,
+ false,
+ false,
+ true,
+ pointeeType,
+ sharingPolicy,
+ rawGetPointee,
+ rawConstructor,
+ rawShare,
+ rawDestructor,
+ );
+ return [registeredPointer];
+ },
+ );
+ }
+
+ function __embind_register_std_string(rawType, name) {
+ rawType >>>= 0;
+ name >>>= 0;
+ name = readLatin1String(name);
+ var stdStringIsUTF8 = name === "std::string";
+ registerType(rawType, {
+ name: name,
+ fromWireType(value) {
+ var length = GROWABLE_HEAP_U32()[(value >>> 2) >>> 0];
+ var payload = value + 4;
+ var str;
+ if (stdStringIsUTF8) {
+ var decodeStartPtr = payload;
+ for (var i = 0; i <= length; ++i) {
+ var currentBytePtr = payload + i;
+ if (
+ i == length ||
+ GROWABLE_HEAP_U8()[currentBytePtr >>> 0] == 0
+ ) {
+ var maxRead = currentBytePtr - decodeStartPtr;
+ var stringSegment = UTF8ToString(decodeStartPtr, maxRead);
+ if (str === undefined) {
+ str = stringSegment;
+ } else {
+ str += String.fromCharCode(0);
+ str += stringSegment;
+ }
+ decodeStartPtr = currentBytePtr + 1;
+ }
+ }
+ } else {
+ var a = new Array(length);
+ for (var i = 0; i < length; ++i) {
+ a[i] = String.fromCharCode(
+ GROWABLE_HEAP_U8()[(payload + i) >>> 0],
+ );
+ }
+ str = a.join("");
+ }
+ _free(value);
+ return str;
+ },
+ toWireType(destructors, value) {
+ if (value instanceof ArrayBuffer) {
+ value = new Uint8Array(value);
+ }
+ var length;
+ var valueIsOfTypeString = typeof value == "string";
+ if (
+ !(
+ valueIsOfTypeString ||
+ value instanceof Uint8Array ||
+ value instanceof Uint8ClampedArray ||
+ value instanceof Int8Array
+ )
+ ) {
+ throwBindingError("Cannot pass non-string to std::string");
+ }
+ if (stdStringIsUTF8 && valueIsOfTypeString) {
+ length = lengthBytesUTF8(value);
+ } else {
+ length = value.length;
+ }
+ var base = _malloc(4 + length + 1);
+ var ptr = base + 4;
+ GROWABLE_HEAP_U32()[(base >>> 2) >>> 0] = length;
+ if (stdStringIsUTF8 && valueIsOfTypeString) {
+ stringToUTF8(value, ptr, length + 1);
+ } else {
+ if (valueIsOfTypeString) {
+ for (var i = 0; i < length; ++i) {
+ var charCode = value.charCodeAt(i);
+ if (charCode > 255) {
+ _free(ptr);
+ throwBindingError(
+ "String has UTF-16 code units that do not fit in 8 bits",
+ );
+ }
+ GROWABLE_HEAP_U8()[(ptr + i) >>> 0] = charCode;
+ }
+ } else {
+ for (var i = 0; i < length; ++i) {
+ GROWABLE_HEAP_U8()[(ptr + i) >>> 0] = value[i];
+ }
+ }
+ }
+ if (destructors !== null) {
+ destructors.push(_free, base);
+ }
+ return base;
+ },
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: readPointer,
+ destructorFunction(ptr) {
+ _free(ptr);
+ },
+ });
+ }
+
+ var UTF16Decoder =
+ typeof TextDecoder != "undefined"
+ ? new TextDecoder("utf-16le")
+ : undefined;
+
+ var UTF16ToString = (ptr, maxBytesToRead) => {
+ var endPtr = ptr;
+ var idx = endPtr >> 1;
+ var maxIdx = idx + maxBytesToRead / 2;
+ while (!(idx >= maxIdx) && GROWABLE_HEAP_U16()[idx >>> 0]) ++idx;
+ endPtr = idx << 1;
+ if (endPtr - ptr > 32 && UTF16Decoder)
+ return UTF16Decoder.decode(GROWABLE_HEAP_U8().slice(ptr, endPtr));
+ var str = "";
+ for (var i = 0; !(i >= maxBytesToRead / 2); ++i) {
+ var codeUnit = GROWABLE_HEAP_I16()[((ptr + i * 2) >>> 1) >>> 0];
+ if (codeUnit == 0) break;
+ str += String.fromCharCode(codeUnit);
+ }
+ return str;
+ };
+
+ var stringToUTF16 = (str, outPtr, maxBytesToWrite) => {
+ if (maxBytesToWrite === undefined) {
+ maxBytesToWrite = 2147483647;
+ }
+ if (maxBytesToWrite < 2) return 0;
+ maxBytesToWrite -= 2;
+ var startPtr = outPtr;
+ var numCharsToWrite =
+ maxBytesToWrite < str.length * 2 ? maxBytesToWrite / 2 : str.length;
+ for (var i = 0; i < numCharsToWrite; ++i) {
+ var codeUnit = str.charCodeAt(i);
+ GROWABLE_HEAP_I16()[(outPtr >>> 1) >>> 0] = codeUnit;
+ outPtr += 2;
+ }
+ GROWABLE_HEAP_I16()[(outPtr >>> 1) >>> 0] = 0;
+ return outPtr - startPtr;
+ };
+
+ var lengthBytesUTF16 = (str) => str.length * 2;
+
+ var UTF32ToString = (ptr, maxBytesToRead) => {
+ var i = 0;
+ var str = "";
+ while (!(i >= maxBytesToRead / 4)) {
+ var utf32 = GROWABLE_HEAP_I32()[((ptr + i * 4) >>> 2) >>> 0];
+ if (utf32 == 0) break;
+ ++i;
+ if (utf32 >= 65536) {
+ var ch = utf32 - 65536;
+ str += String.fromCharCode(55296 | (ch >> 10), 56320 | (ch & 1023));
+ } else {
+ str += String.fromCharCode(utf32);
+ }
+ }
+ return str;
+ };
+
+ var stringToUTF32 = (str, outPtr, maxBytesToWrite) => {
+ outPtr >>>= 0;
+ if (maxBytesToWrite === undefined) {
+ maxBytesToWrite = 2147483647;
+ }
+ if (maxBytesToWrite < 4) return 0;
+ var startPtr = outPtr;
+ var endPtr = startPtr + maxBytesToWrite - 4;
+ for (var i = 0; i < str.length; ++i) {
+ var codeUnit = str.charCodeAt(i);
+ if (codeUnit >= 55296 && codeUnit <= 57343) {
+ var trailSurrogate = str.charCodeAt(++i);
+ codeUnit =
+ (65536 + ((codeUnit & 1023) << 10)) | (trailSurrogate & 1023);
+ }
+ GROWABLE_HEAP_I32()[(outPtr >>> 2) >>> 0] = codeUnit;
+ outPtr += 4;
+ if (outPtr + 4 > endPtr) break;
+ }
+ GROWABLE_HEAP_I32()[(outPtr >>> 2) >>> 0] = 0;
+ return outPtr - startPtr;
+ };
+
+ var lengthBytesUTF32 = (str) => {
+ var len = 0;
+ for (var i = 0; i < str.length; ++i) {
+ var codeUnit = str.charCodeAt(i);
+ if (codeUnit >= 55296 && codeUnit <= 57343) ++i;
+ len += 4;
+ }
+ return len;
+ };
+
+ var __embind_register_std_wstring = function (rawType, charSize, name) {
+ rawType >>>= 0;
+ charSize >>>= 0;
+ name >>>= 0;
+ name = readLatin1String(name);
+ var decodeString, encodeString, getHeap, lengthBytesUTF, shift;
+ if (charSize === 2) {
+ decodeString = UTF16ToString;
+ encodeString = stringToUTF16;
+ lengthBytesUTF = lengthBytesUTF16;
+ getHeap = () => GROWABLE_HEAP_U16();
+ shift = 1;
+ } else if (charSize === 4) {
+ decodeString = UTF32ToString;
+ encodeString = stringToUTF32;
+ lengthBytesUTF = lengthBytesUTF32;
+ getHeap = () => GROWABLE_HEAP_U32();
+ shift = 2;
+ }
+ registerType(rawType, {
+ name: name,
+ fromWireType: (value) => {
+ var length = GROWABLE_HEAP_U32()[(value >>> 2) >>> 0];
+ var HEAP = getHeap();
+ var str;
+ var decodeStartPtr = value + 4;
+ for (var i = 0; i <= length; ++i) {
+ var currentBytePtr = value + 4 + i * charSize;
+ if (i == length || HEAP[currentBytePtr >>> shift] == 0) {
+ var maxReadBytes = currentBytePtr - decodeStartPtr;
+ var stringSegment = decodeString(decodeStartPtr, maxReadBytes);
+ if (str === undefined) {
+ str = stringSegment;
+ } else {
+ str += String.fromCharCode(0);
+ str += stringSegment;
+ }
+ decodeStartPtr = currentBytePtr + charSize;
+ }
+ }
+ _free(value);
+ return str;
+ },
+ toWireType: (destructors, value) => {
+ if (!(typeof value == "string")) {
+ throwBindingError(
+ `Cannot pass non-string to C++ string type ${name}`,
+ );
+ }
+ var length = lengthBytesUTF(value);
+ var ptr = _malloc(4 + length + charSize);
+ GROWABLE_HEAP_U32()[ptr >>> 2] = length >> shift;
+ encodeString(value, ptr + 4, length + charSize);
+ if (destructors !== null) {
+ destructors.push(_free, ptr);
+ }
+ return ptr;
+ },
+ argPackAdvance: GenericWireTypeSize,
+ readValueFromPointer: simpleReadValueFromPointer,
+ destructorFunction(ptr) {
+ _free(ptr);
+ },
+ });
+ };
+
+ var __embind_register_void = function (rawType, name) {
+ rawType >>>= 0;
+ name >>>= 0;
+ name = readLatin1String(name);
+ registerType(rawType, {
+ isVoid: true,
+ name: name,
+ argPackAdvance: 0,
+ fromWireType: () => undefined,
+ toWireType: (destructors, o) => undefined,
+ });
+ };
+
+ var nowIsMonotonic = true;
+
+ var __emscripten_get_now_is_monotonic = () => nowIsMonotonic;
+
+ function __emscripten_thread_mailbox_await(pthread_ptr) {
+ pthread_ptr >>>= 0;
+ if (typeof Atomics.waitAsync === "function") {
+ var wait = Atomics.waitAsync(
+ GROWABLE_HEAP_I32(),
+ pthread_ptr >>> 2,
+ pthread_ptr,
+ );
+ wait.value.then(checkMailbox);
+ var waitingAsync = pthread_ptr + 128;
+ Atomics.store(GROWABLE_HEAP_I32(), waitingAsync >>> 2, 1);
+ }
+ }
+
+ Module["__emscripten_thread_mailbox_await"] =
+ __emscripten_thread_mailbox_await;
+
+ var checkMailbox = () => {
+ var pthread_ptr = _pthread_self();
+ if (pthread_ptr) {
+ __emscripten_thread_mailbox_await(pthread_ptr);
+ callUserCallback(() => __emscripten_check_mailbox());
+ }
+ };
+
+ Module["checkMailbox"] = checkMailbox;
+
+ var __emscripten_notify_mailbox_postmessage = function (
+ targetThreadId,
+ currThreadId,
+ mainThreadId,
+ ) {
+ targetThreadId >>>= 0;
+ currThreadId >>>= 0;
+ mainThreadId >>>= 0;
+ if (targetThreadId == currThreadId) {
+ setTimeout(() => checkMailbox());
+ } else if (ENVIRONMENT_IS_PTHREAD) {
+ postMessage({
+ targetThread: targetThreadId,
+ cmd: "checkMailbox",
+ });
+ } else {
+ var worker = PThread.pthreads[targetThreadId];
+ if (!worker) {
+ return;
+ }
+ worker.postMessage({
+ cmd: "checkMailbox",
+ });
+ }
+ };
+
+ var withStackSave = (f) => {
+ var stack = stackSave();
+ var ret = f();
+ stackRestore(stack);
+ return ret;
+ };
+
+ /** @type{function(number, (number|boolean), ...(number|boolean))} */ var proxyToMainThread =
+ function (index, sync) {
+ var numCallArgs = arguments.length - 2;
+ var outerArgs = arguments;
+ return withStackSave(() => {
+ var serializedNumCallArgs = numCallArgs;
+ var args = stackAlloc(serializedNumCallArgs * 8);
+ var b = args >>> 3;
+ for (var i = 0; i < numCallArgs; i++) {
+ var arg = outerArgs[2 + i];
+ GROWABLE_HEAP_F64()[(b + i) >>> 0] = arg;
+ }
+ return __emscripten_run_on_main_thread_js(
+ index,
+ serializedNumCallArgs,
+ args,
+ sync,
+ );
+ });
+ };
+
+ var proxiedJSCallArgs = [];
+
+ function __emscripten_receive_on_main_thread_js(
+ index,
+ callingThread,
+ numCallArgs,
+ args,
+ ) {
+ callingThread >>>= 0;
+ args >>>= 0;
+ proxiedJSCallArgs.length = numCallArgs;
+ var b = args >>> 3;
+ for (var i = 0; i < numCallArgs; i++) {
+ proxiedJSCallArgs[i] = GROWABLE_HEAP_F64()[(b + i) >>> 0];
+ }
+ var func = proxiedFunctionTable[index];
+ PThread.currentProxiedOperationCallerThread = callingThread;
+ var rtn = func.apply(null, proxiedJSCallArgs);
+ PThread.currentProxiedOperationCallerThread = 0;
+ return rtn;
+ }
+
+ function __emscripten_runtime_keepalive_clear() {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(20, 1);
+ noExitRuntime = false;
+ runtimeKeepaliveCounter = 0;
+ }
+
+ function __emscripten_thread_set_strongref(thread) {
+ thread >>>= 0;
+ if (ENVIRONMENT_IS_NODE) {
+ PThread.pthreads[thread].ref();
+ }
+ }
+
+ function __emval_as(handle, returnType, destructorsRef) {
+ handle >>>= 0;
+ returnType >>>= 0;
+ destructorsRef >>>= 0;
+ handle = Emval.toValue(handle);
+ returnType = requireRegisteredType(returnType, "emval::as");
+ var destructors = [];
+ var rd = Emval.toHandle(destructors);
+ GROWABLE_HEAP_U32()[(destructorsRef >>> 2) >>> 0] = rd;
+ return returnType["toWireType"](destructors, handle);
+ }
+
+ function __emval_as_int64(handle, returnType) {
+ handle >>>= 0;
+ returnType >>>= 0;
+ handle = Emval.toValue(handle);
+ returnType = requireRegisteredType(returnType, "emval::as");
+ return returnType["toWireType"](null, handle);
+ }
+
+ function __emval_as_uint64(handle, returnType) {
+ handle >>>= 0;
+ returnType >>>= 0;
+ handle = Emval.toValue(handle);
+ returnType = requireRegisteredType(returnType, "emval::as");
+ return returnType["toWireType"](null, handle);
+ }
+
+ var emval_lookupTypes = (argCount, argTypes) => {
+ var a = new Array(argCount);
+ for (var i = 0; i < argCount; ++i) {
+ a[i] = requireRegisteredType(
+ GROWABLE_HEAP_U32()[((argTypes + i * 4) >>> 2) >>> 0],
+ "parameter " + i,
+ );
+ }
+ return a;
+ };
+
+ function __emval_call(handle, argCount, argTypes, argv) {
+ handle >>>= 0;
+ argTypes >>>= 0;
+ argv >>>= 0;
+ handle = Emval.toValue(handle);
+ var types = emval_lookupTypes(argCount, argTypes);
+ var args = new Array(argCount);
+ for (var i = 0; i < argCount; ++i) {
+ var type = types[i];
+ args[i] = type["readValueFromPointer"](argv);
+ argv += type["argPackAdvance"];
+ }
+ var rv = handle.apply(undefined, args);
+ return Emval.toHandle(rv);
+ }
+
+ var emval_symbols = {};
+
+ var getStringOrSymbol = (address) => {
+ var symbol = emval_symbols[address];
+ if (symbol === undefined) {
+ return readLatin1String(address);
+ }
+ return symbol;
+ };
+
+ var emval_methodCallers = [];
+
+ function __emval_call_method(
+ caller,
+ handle,
+ methodName,
+ destructorsRef,
+ args,
+ ) {
+ caller >>>= 0;
+ handle >>>= 0;
+ methodName >>>= 0;
+ destructorsRef >>>= 0;
+ args >>>= 0;
+ caller = emval_methodCallers[caller];
+ handle = Emval.toValue(handle);
+ methodName = getStringOrSymbol(methodName);
+ var destructors = [];
+ var result = caller(handle, methodName, destructors, args);
+ if (destructors.length) {
+ GROWABLE_HEAP_U32()[(destructorsRef >>> 2) >>> 0] =
+ Emval.toHandle(destructors);
+ }
+ return result;
+ }
+
+ function __emval_equals(first, second) {
+ first >>>= 0;
+ second >>>= 0;
+ first = Emval.toValue(first);
+ second = Emval.toValue(second);
+ return first == second;
+ }
+
+ var emval_get_global = () => {
+ if (typeof globalThis == "object") {
+ return globalThis;
+ }
+ return (function () {
+ return Function;
+ })()("return this")();
+ };
+
+ function __emval_get_global(name) {
+ name >>>= 0;
+ if (name === 0) {
+ return Emval.toHandle(emval_get_global());
+ } else {
+ name = getStringOrSymbol(name);
+ return Emval.toHandle(emval_get_global()[name]);
+ }
+ }
+
+ var emval_addMethodCaller = (caller) => {
+ var id = emval_methodCallers.length;
+ emval_methodCallers.push(caller);
+ return id;
+ };
+
+ function __emval_get_method_caller(argCount, argTypes) {
+ argTypes >>>= 0;
+ var types = emval_lookupTypes(argCount, argTypes);
+ var retType = types.shift();
+ argCount--;
+ var params = ["retType"];
+ var args = [retType];
+ var argsList = "";
+ for (var i = 0; i < argCount; ++i) {
+ argsList += (i !== 0 ? ", " : "") + "arg" + i;
+ params.push("argType" + i);
+ args.push(types[i]);
+ }
+ var signatureName =
+ retType.name + "_$" + types.map((t) => t.name).join("_") + "$";
+ var functionName = makeLegalFunctionName("methodCaller_" + signatureName);
+ var functionBody =
+ "return function " +
+ functionName +
+ "(handle, name, destructors, args) {\n";
+ var offset = 0;
+ for (var i = 0; i < argCount; ++i) {
+ functionBody +=
+ " var arg" +
+ i +
+ " = argType" +
+ i +
+ ".readValueFromPointer(args" +
+ (offset ? "+" + offset : "") +
+ ");\n";
+ offset += types[i]["argPackAdvance"];
+ }
+ functionBody += " var rv = handle[name](" + argsList + ");\n";
+ for (var i = 0; i < argCount; ++i) {
+ if (types[i]["deleteObject"]) {
+ functionBody += " argType" + i + ".deleteObject(arg" + i + ");\n";
+ }
+ }
+ if (!retType.isVoid) {
+ functionBody += " return retType.toWireType(destructors, rv);\n";
+ }
+ functionBody += "};\n";
+ params.push(functionBody);
+ var invokerFunction = newFunc(Function, params).apply(null, args);
+ return emval_addMethodCaller(invokerFunction);
+ }
+
+ function __emval_get_property(handle, key) {
+ handle >>>= 0;
+ key >>>= 0;
+ handle = Emval.toValue(handle);
+ key = Emval.toValue(key);
+ return Emval.toHandle(handle[key]);
+ }
+
+ function __emval_incref(handle) {
+ handle >>>= 0;
+ if (handle > 4) {
+ emval_handles.get(handle).refcount += 1;
+ }
+ }
+
+ function __emval_instanceof(object, constructor) {
+ object >>>= 0;
+ constructor >>>= 0;
+ object = Emval.toValue(object);
+ constructor = Emval.toValue(constructor);
+ return object instanceof constructor;
+ }
+
+ function __emval_new_array() {
+ return Emval.toHandle([]);
+ }
+
+ function __emval_new_cstring(v) {
+ v >>>= 0;
+ return Emval.toHandle(getStringOrSymbol(v));
+ }
+
+ function __emval_new_object() {
+ return Emval.toHandle({});
+ }
+
+ function __emval_run_destructors(handle) {
+ handle >>>= 0;
+ var destructors = Emval.toValue(handle);
+ runDestructors(destructors);
+ __emval_decref(handle);
+ }
+
+ function __emval_set_property(handle, key, value) {
+ handle >>>= 0;
+ key >>>= 0;
+ value >>>= 0;
+ handle = Emval.toValue(handle);
+ key = Emval.toValue(key);
+ value = Emval.toValue(value);
+ handle[key] = value;
+ }
+
+ function __emval_take_value(type, arg) {
+ type >>>= 0;
+ arg >>>= 0;
+ type = requireRegisteredType(type, "_emval_take_value");
+ var v = type["readValueFromPointer"](arg);
+ return Emval.toHandle(v);
+ }
+
+ function __emval_typeof(handle) {
+ handle >>>= 0;
+ handle = Emval.toValue(handle);
+ return Emval.toHandle(typeof handle);
+ }
+
+ function __mmap_js(
+ len,
+ prot,
+ flags,
+ fd,
+ offset_low,
+ offset_high,
+ allocated,
+ addr,
+ ) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(
+ 21,
+ 1,
+ len,
+ prot,
+ flags,
+ fd,
+ offset_low,
+ offset_high,
+ allocated,
+ addr,
+ );
+ len >>>= 0;
+ var offset = convertI32PairToI53Checked(offset_low, offset_high);
+ allocated >>>= 0;
+ addr >>>= 0;
+ try {
+ if (isNaN(offset)) return 61;
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var res = FS.mmap(stream, len, offset, prot, flags);
+ var ptr = res.ptr;
+ GROWABLE_HEAP_I32()[(allocated >>> 2) >>> 0] = res.allocated;
+ GROWABLE_HEAP_U32()[(addr >>> 2) >>> 0] = ptr;
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ function __munmap_js(addr, len, prot, flags, fd, offset_low, offset_high) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(
+ 22,
+ 1,
+ addr,
+ len,
+ prot,
+ flags,
+ fd,
+ offset_low,
+ offset_high,
+ );
+ addr >>>= 0;
+ len >>>= 0;
+ var offset = convertI32PairToI53Checked(offset_low, offset_high);
+ try {
+ if (isNaN(offset)) return 61;
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ if (prot & 2) {
+ SYSCALLS.doMsync(addr, stream, len, flags, offset);
+ }
+ FS.munmap(stream);
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return -e.errno;
+ }
+ }
+
+ var timers = {};
+
+ var _emscripten_get_now;
+
+ _emscripten_get_now = () => performance.timeOrigin + performance.now();
+
+ function __setitimer_js(which, timeout_ms) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(23, 1, which, timeout_ms);
+ if (timers[which]) {
+ clearTimeout(timers[which].id);
+ delete timers[which];
+ }
+ if (!timeout_ms) return 0;
+ var id = setTimeout(() => {
+ delete timers[which];
+ callUserCallback(() =>
+ __emscripten_timeout(which, _emscripten_get_now()),
+ );
+ }, timeout_ms);
+ timers[which] = {
+ id: id,
+ timeout_ms: timeout_ms,
+ };
+ return 0;
+ }
+
+ var _abort = () => {
+ abort("");
+ };
+
+ var warnOnce = (text) => {
+ if (!warnOnce.shown) warnOnce.shown = {};
+ if (!warnOnce.shown[text]) {
+ warnOnce.shown[text] = 1;
+ if (ENVIRONMENT_IS_NODE) text = "warning: " + text;
+ err(text);
+ }
+ };
+
+ var _emscripten_check_blocking_allowed = () => {};
+
+ var _emscripten_date_now = () => Date.now();
+
+ var _emscripten_exit_with_live_runtime = () => {
+ runtimeKeepalivePush();
+ throw "unwind";
+ };
+
+ function _emscripten_force_exit(status) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(24, 1, status);
+ __emscripten_runtime_keepalive_clear();
+ _exit(status);
+ }
+
+ var getHeapMax = () => 4294901760;
+
+ function _emscripten_get_heap_max() {
+ return getHeapMax();
+ }
+
+ var _emscripten_num_logical_cores = () => {
+ if (ENVIRONMENT_IS_NODE) return require("os").cpus().length;
+ return navigator["hardwareConcurrency"];
+ };
+
+ var growMemory = (size) => {
+ var b = wasmMemory.buffer;
+ var pages = (size - b.byteLength + 65535) / 65536;
+ try {
+ wasmMemory.grow(pages);
+ updateMemoryViews();
+ return 1;
+ } /*success*/ catch (e) {}
+ };
+
+ function _emscripten_resize_heap(requestedSize) {
+ requestedSize >>>= 0;
+ var oldSize = GROWABLE_HEAP_U8().length;
+ if (requestedSize <= oldSize) {
+ return false;
+ }
+ var maxHeapSize = getHeapMax();
+ if (requestedSize > maxHeapSize) {
+ return false;
+ }
+ var alignUp = (x, multiple) =>
+ x + ((multiple - (x % multiple)) % multiple);
+ for (var cutDown = 1; cutDown <= 4; cutDown *= 2) {
+ var overGrownHeapSize = oldSize * (1 + 0.2 / cutDown);
+ overGrownHeapSize = Math.min(
+ overGrownHeapSize,
+ requestedSize + 100663296,
+ );
+ var newSize = Math.min(
+ maxHeapSize,
+ alignUp(Math.max(requestedSize, overGrownHeapSize), 65536),
+ );
+ var replacement = growMemory(newSize);
+ if (replacement) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ var ENV = {};
+
+ var getExecutableName = () => thisProgram || "./this.program";
+
+ var getEnvStrings = () => {
+ if (!getEnvStrings.strings) {
+ var lang =
+ (
+ (typeof navigator == "object" &&
+ navigator.languages &&
+ navigator.languages[0]) ||
+ "C"
+ ).replace("-", "_") + ".UTF-8";
+ var env = {
+ USER: "web_user",
+ LOGNAME: "web_user",
+ PATH: "/",
+ PWD: "/",
+ HOME: "/home/web_user",
+ LANG: lang,
+ _: getExecutableName(),
+ };
+ for (var x in ENV) {
+ if (ENV[x] === undefined) delete env[x];
+ else env[x] = ENV[x];
+ }
+ var strings = [];
+ for (var x in env) {
+ strings.push(`${x}=${env[x]}`);
+ }
+ getEnvStrings.strings = strings;
+ }
+ return getEnvStrings.strings;
+ };
+
+ var stringToAscii = (str, buffer) => {
+ for (var i = 0; i < str.length; ++i) {
+ GROWABLE_HEAP_I8()[(buffer++ >>> 0) >>> 0] = str.charCodeAt(i);
+ }
+ GROWABLE_HEAP_I8()[(buffer >>> 0) >>> 0] = 0;
+ };
+
+ var _environ_get = function (__environ, environ_buf) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(25, 1, __environ, environ_buf);
+ __environ >>>= 0;
+ environ_buf >>>= 0;
+ var bufSize = 0;
+ getEnvStrings().forEach((string, i) => {
+ var ptr = environ_buf + bufSize;
+ GROWABLE_HEAP_U32()[((__environ + i * 4) >>> 2) >>> 0] = ptr;
+ stringToAscii(string, ptr);
+ bufSize += string.length + 1;
+ });
+ return 0;
+ };
+
+ var _environ_sizes_get = function (penviron_count, penviron_buf_size) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(26, 1, penviron_count, penviron_buf_size);
+ penviron_count >>>= 0;
+ penviron_buf_size >>>= 0;
+ var strings = getEnvStrings();
+ GROWABLE_HEAP_U32()[(penviron_count >>> 2) >>> 0] = strings.length;
+ var bufSize = 0;
+ strings.forEach((string) => (bufSize += string.length + 1));
+ GROWABLE_HEAP_U32()[(penviron_buf_size >>> 2) >>> 0] = bufSize;
+ return 0;
+ };
+
+ function _fd_close(fd) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(27, 1, fd);
+ try {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ FS.close(stream);
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return e.errno;
+ }
+ }
+
+ function _fd_fdstat_get(fd, pbuf) {
+ if (ENVIRONMENT_IS_PTHREAD) return proxyToMainThread(28, 1, fd, pbuf);
+ pbuf >>>= 0;
+ try {
+ var rightsBase = 0;
+ var rightsInheriting = 0;
+ var flags = 0;
+ {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var type = stream.tty
+ ? 2
+ : FS.isDir(stream.mode)
+ ? 3
+ : FS.isLink(stream.mode)
+ ? 7
+ : 4;
+ }
+ GROWABLE_HEAP_I8()[(pbuf >>> 0) >>> 0] = type;
+ GROWABLE_HEAP_I16()[((pbuf + 2) >>> 1) >>> 0] = flags;
+ (tempI64 = [
+ rightsBase >>> 0,
+ ((tempDouble = rightsBase),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((pbuf + 8) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((pbuf + 12) >>> 2) >>> 0] = tempI64[1]);
+ (tempI64 = [
+ rightsInheriting >>> 0,
+ ((tempDouble = rightsInheriting),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[((pbuf + 16) >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((pbuf + 20) >>> 2) >>> 0] = tempI64[1]);
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return e.errno;
+ }
+ }
+
+ /** @param {number=} offset */ var doReadv = (
+ stream,
+ iov,
+ iovcnt,
+ offset,
+ ) => {
+ var ret = 0;
+ for (var i = 0; i < iovcnt; i++) {
+ var ptr = GROWABLE_HEAP_U32()[(iov >>> 2) >>> 0];
+ var len = GROWABLE_HEAP_U32()[((iov + 4) >>> 2) >>> 0];
+ iov += 8;
+ var curr = FS.read(stream, GROWABLE_HEAP_I8(), ptr, len, offset);
+ if (curr < 0) return -1;
+ ret += curr;
+ if (curr < len) break;
+ if (typeof offset !== "undefined") {
+ offset += curr;
+ }
+ }
+ return ret;
+ };
+
+ function _fd_pread(fd, iov, iovcnt, offset_low, offset_high, pnum) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(
+ 29,
+ 1,
+ fd,
+ iov,
+ iovcnt,
+ offset_low,
+ offset_high,
+ pnum,
+ );
+ iov >>>= 0;
+ iovcnt >>>= 0;
+ var offset = convertI32PairToI53Checked(offset_low, offset_high);
+ pnum >>>= 0;
+ try {
+ if (isNaN(offset)) return 61;
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doReadv(stream, iov, iovcnt, offset);
+ GROWABLE_HEAP_U32()[(pnum >>> 2) >>> 0] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return e.errno;
+ }
+ }
+
+ /** @param {number=} offset */ var doWritev = (
+ stream,
+ iov,
+ iovcnt,
+ offset,
+ ) => {
+ var ret = 0;
+ for (var i = 0; i < iovcnt; i++) {
+ var ptr = GROWABLE_HEAP_U32()[(iov >>> 2) >>> 0];
+ var len = GROWABLE_HEAP_U32()[((iov + 4) >>> 2) >>> 0];
+ iov += 8;
+ var curr = FS.write(stream, GROWABLE_HEAP_I8(), ptr, len, offset);
+ if (curr < 0) return -1;
+ ret += curr;
+ if (typeof offset !== "undefined") {
+ offset += curr;
+ }
+ }
+ return ret;
+ };
+
+ function _fd_pwrite(fd, iov, iovcnt, offset_low, offset_high, pnum) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(
+ 30,
+ 1,
+ fd,
+ iov,
+ iovcnt,
+ offset_low,
+ offset_high,
+ pnum,
+ );
+ iov >>>= 0;
+ iovcnt >>>= 0;
+ var offset = convertI32PairToI53Checked(offset_low, offset_high);
+ pnum >>>= 0;
+ try {
+ if (isNaN(offset)) return 61;
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doWritev(stream, iov, iovcnt, offset);
+ GROWABLE_HEAP_U32()[(pnum >>> 2) >>> 0] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return e.errno;
+ }
+ }
+
+ function _fd_read(fd, iov, iovcnt, pnum) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(31, 1, fd, iov, iovcnt, pnum);
+ iov >>>= 0;
+ iovcnt >>>= 0;
+ pnum >>>= 0;
+ try {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doReadv(stream, iov, iovcnt);
+ GROWABLE_HEAP_U32()[(pnum >>> 2) >>> 0] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return e.errno;
+ }
+ }
+
+ function _fd_seek(fd, offset_low, offset_high, whence, newOffset) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(
+ 32,
+ 1,
+ fd,
+ offset_low,
+ offset_high,
+ whence,
+ newOffset,
+ );
+ var offset = convertI32PairToI53Checked(offset_low, offset_high);
+ newOffset >>>= 0;
+ try {
+ if (isNaN(offset)) return 61;
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ FS.llseek(stream, offset, whence);
+ (tempI64 = [
+ stream.position >>> 0,
+ ((tempDouble = stream.position),
+ +Math.abs(tempDouble) >= 1
+ ? tempDouble > 0
+ ? +Math.floor(tempDouble / 4294967296) >>> 0
+ : ~~+Math.ceil(
+ (tempDouble - +(~~tempDouble >>> 0)) / 4294967296,
+ ) >>> 0
+ : 0),
+ ]),
+ (GROWABLE_HEAP_I32()[(newOffset >>> 2) >>> 0] = tempI64[0]),
+ (GROWABLE_HEAP_I32()[((newOffset + 4) >>> 2) >>> 0] = tempI64[1]);
+ if (stream.getdents && offset === 0 && whence === 0)
+ stream.getdents = null;
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return e.errno;
+ }
+ }
+
+ function _fd_write(fd, iov, iovcnt, pnum) {
+ if (ENVIRONMENT_IS_PTHREAD)
+ return proxyToMainThread(33, 1, fd, iov, iovcnt, pnum);
+ iov >>>= 0;
+ iovcnt >>>= 0;
+ pnum >>>= 0;
+ try {
+ var stream = SYSCALLS.getStreamFromFD(fd);
+ var num = doWritev(stream, iov, iovcnt);
+ GROWABLE_HEAP_U32()[(pnum >>> 2) >>> 0] = num;
+ return 0;
+ } catch (e) {
+ if (typeof FS == "undefined" || !(e.name === "ErrnoError")) throw e;
+ return e.errno;
+ }
+ }
+
+ function _llvm_eh_typeid_for(type) {
+ type >>>= 0;
+ return type;
+ }
+
+ var isLeapYear = (year) =>
+ year % 4 === 0 && (year % 100 !== 0 || year % 400 === 0);
+
+ var arraySum = (array, index) => {
+ var sum = 0;
+ for (var i = 0; i <= index; sum += array[i++]) {}
+ return sum;
+ };
+
+ var MONTH_DAYS_LEAP = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
+
+ var MONTH_DAYS_REGULAR = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
+
+ var addDays = (date, days) => {
+ var newDate = new Date(date.getTime());
+ while (days > 0) {
+ var leap = isLeapYear(newDate.getFullYear());
+ var currentMonth = newDate.getMonth();
+ var daysInCurrentMonth = (leap ? MONTH_DAYS_LEAP : MONTH_DAYS_REGULAR)[
+ currentMonth
+ ];
+ if (days > daysInCurrentMonth - newDate.getDate()) {
+ days -= daysInCurrentMonth - newDate.getDate() + 1;
+ newDate.setDate(1);
+ if (currentMonth < 11) {
+ newDate.setMonth(currentMonth + 1);
+ } else {
+ newDate.setMonth(0);
+ newDate.setFullYear(newDate.getFullYear() + 1);
+ }
+ } else {
+ newDate.setDate(newDate.getDate() + days);
+ return newDate;
+ }
+ }
+ return newDate;
+ };
+
+ var writeArrayToMemory = (array, buffer) => {
+ GROWABLE_HEAP_I8().set(array, buffer >>> 0);
+ };
+
+ function _strftime(s, maxsize, format, tm) {
+ s >>>= 0;
+ maxsize >>>= 0;
+ format >>>= 0;
+ tm >>>= 0;
+ var tm_zone = GROWABLE_HEAP_U32()[((tm + 40) >>> 2) >>> 0];
+ var date = {
+ tm_sec: GROWABLE_HEAP_I32()[(tm >>> 2) >>> 0],
+ tm_min: GROWABLE_HEAP_I32()[((tm + 4) >>> 2) >>> 0],
+ tm_hour: GROWABLE_HEAP_I32()[((tm + 8) >>> 2) >>> 0],
+ tm_mday: GROWABLE_HEAP_I32()[((tm + 12) >>> 2) >>> 0],
+ tm_mon: GROWABLE_HEAP_I32()[((tm + 16) >>> 2) >>> 0],
+ tm_year: GROWABLE_HEAP_I32()[((tm + 20) >>> 2) >>> 0],
+ tm_wday: GROWABLE_HEAP_I32()[((tm + 24) >>> 2) >>> 0],
+ tm_yday: GROWABLE_HEAP_I32()[((tm + 28) >>> 2) >>> 0],
+ tm_isdst: GROWABLE_HEAP_I32()[((tm + 32) >>> 2) >>> 0],
+ tm_gmtoff: GROWABLE_HEAP_I32()[((tm + 36) >>> 2) >>> 0],
+ tm_zone: tm_zone ? UTF8ToString(tm_zone) : "",
+ };
+ var pattern = UTF8ToString(format);
+ var EXPANSION_RULES_1 = {
+ "%c": "%a %b %d %H:%M:%S %Y",
+ "%D": "%m/%d/%y",
+ "%F": "%Y-%m-%d",
+ "%h": "%b",
+ "%r": "%I:%M:%S %p",
+ "%R": "%H:%M",
+ "%T": "%H:%M:%S",
+ "%x": "%m/%d/%y",
+ "%X": "%H:%M:%S",
+ "%Ec": "%c",
+ "%EC": "%C",
+ "%Ex": "%m/%d/%y",
+ "%EX": "%H:%M:%S",
+ "%Ey": "%y",
+ "%EY": "%Y",
+ "%Od": "%d",
+ "%Oe": "%e",
+ "%OH": "%H",
+ "%OI": "%I",
+ "%Om": "%m",
+ "%OM": "%M",
+ "%OS": "%S",
+ "%Ou": "%u",
+ "%OU": "%U",
+ "%OV": "%V",
+ "%Ow": "%w",
+ "%OW": "%W",
+ "%Oy": "%y",
+ };
+ for (var rule in EXPANSION_RULES_1) {
+ pattern = pattern.replace(
+ new RegExp(rule, "g"),
+ EXPANSION_RULES_1[rule],
+ );
+ }
+ var WEEKDAYS = [
+ "Sunday",
+ "Monday",
+ "Tuesday",
+ "Wednesday",
+ "Thursday",
+ "Friday",
+ "Saturday",
+ ];
+ var MONTHS = [
+ "January",
+ "February",
+ "March",
+ "April",
+ "May",
+ "June",
+ "July",
+ "August",
+ "September",
+ "October",
+ "November",
+ "December",
+ ];
+ function leadingSomething(value, digits, character) {
+ var str = typeof value == "number" ? value.toString() : value || "";
+ while (str.length < digits) {
+ str = character[0] + str;
+ }
+ return str;
+ }
+ function leadingNulls(value, digits) {
+ return leadingSomething(value, digits, "0");
+ }
+ function compareByDay(date1, date2) {
+ function sgn(value) {
+ return value < 0 ? -1 : value > 0 ? 1 : 0;
+ }
+ var compare;
+ if ((compare = sgn(date1.getFullYear() - date2.getFullYear())) === 0) {
+ if ((compare = sgn(date1.getMonth() - date2.getMonth())) === 0) {
+ compare = sgn(date1.getDate() - date2.getDate());
+ }
+ }
+ return compare;
+ }
+ function getFirstWeekStartDate(janFourth) {
+ switch (janFourth.getDay()) {
+ case 0:
+ return new Date(janFourth.getFullYear() - 1, 11, 29);
+
+ case 1:
+ return janFourth;
+
+ case 2:
+ return new Date(janFourth.getFullYear(), 0, 3);
+
+ case 3:
+ return new Date(janFourth.getFullYear(), 0, 2);
+
+ case 4:
+ return new Date(janFourth.getFullYear(), 0, 1);
+
+ case 5:
+ return new Date(janFourth.getFullYear() - 1, 11, 31);
+
+ case 6:
+ return new Date(janFourth.getFullYear() - 1, 11, 30);
+ }
+ }
+ function getWeekBasedYear(date) {
+ var thisDate = addDays(
+ new Date(date.tm_year + 1900, 0, 1),
+ date.tm_yday,
+ );
+ var janFourthThisYear = new Date(thisDate.getFullYear(), 0, 4);
+ var janFourthNextYear = new Date(thisDate.getFullYear() + 1, 0, 4);
+ var firstWeekStartThisYear = getFirstWeekStartDate(janFourthThisYear);
+ var firstWeekStartNextYear = getFirstWeekStartDate(janFourthNextYear);
+ if (compareByDay(firstWeekStartThisYear, thisDate) <= 0) {
+ if (compareByDay(firstWeekStartNextYear, thisDate) <= 0) {
+ return thisDate.getFullYear() + 1;
+ }
+ return thisDate.getFullYear();
+ }
+ return thisDate.getFullYear() - 1;
+ }
+ var EXPANSION_RULES_2 = {
+ "%a": (date) => WEEKDAYS[date.tm_wday].substring(0, 3),
+ "%A": (date) => WEEKDAYS[date.tm_wday],
+ "%b": (date) => MONTHS[date.tm_mon].substring(0, 3),
+ "%B": (date) => MONTHS[date.tm_mon],
+ "%C": (date) => {
+ var year = date.tm_year + 1900;
+ return leadingNulls((year / 100) | 0, 2);
+ },
+ "%d": (date) => leadingNulls(date.tm_mday, 2),
+ "%e": (date) => leadingSomething(date.tm_mday, 2, " "),
+ "%g": (date) => getWeekBasedYear(date).toString().substring(2),
+ "%G": (date) => getWeekBasedYear(date),
+ "%H": (date) => leadingNulls(date.tm_hour, 2),
+ "%I": (date) => {
+ var twelveHour = date.tm_hour;
+ if (twelveHour == 0) twelveHour = 12;
+ else if (twelveHour > 12) twelveHour -= 12;
+ return leadingNulls(twelveHour, 2);
+ },
+ "%j": (date) =>
+ leadingNulls(
+ date.tm_mday +
+ arraySum(
+ isLeapYear(date.tm_year + 1900)
+ ? MONTH_DAYS_LEAP
+ : MONTH_DAYS_REGULAR,
+ date.tm_mon - 1,
+ ),
+ 3,
+ ),
+ "%m": (date) => leadingNulls(date.tm_mon + 1, 2),
+ "%M": (date) => leadingNulls(date.tm_min, 2),
+ "%n": () => "\n",
+ "%p": (date) => {
+ if (date.tm_hour >= 0 && date.tm_hour < 12) {
+ return "AM";
+ }
+ return "PM";
+ },
+ "%S": (date) => leadingNulls(date.tm_sec, 2),
+ "%t": () => "\t",
+ "%u": (date) => date.tm_wday || 7,
+ "%U": (date) => {
+ var days = date.tm_yday + 7 - date.tm_wday;
+ return leadingNulls(Math.floor(days / 7), 2);
+ },
+ "%V": (date) => {
+ var val = Math.floor(
+ (date.tm_yday + 7 - ((date.tm_wday + 6) % 7)) / 7,
+ );
+ if ((date.tm_wday + 371 - date.tm_yday - 2) % 7 <= 2) {
+ val++;
+ }
+ if (!val) {
+ val = 52;
+ var dec31 = (date.tm_wday + 7 - date.tm_yday - 1) % 7;
+ if (
+ dec31 == 4 ||
+ (dec31 == 5 && isLeapYear((date.tm_year % 400) - 1))
+ ) {
+ val++;
+ }
+ } else if (val == 53) {
+ var jan1 = (date.tm_wday + 371 - date.tm_yday) % 7;
+ if (jan1 != 4 && (jan1 != 3 || !isLeapYear(date.tm_year))) val = 1;
+ }
+ return leadingNulls(val, 2);
+ },
+ "%w": (date) => date.tm_wday,
+ "%W": (date) => {
+ var days = date.tm_yday + 7 - ((date.tm_wday + 6) % 7);
+ return leadingNulls(Math.floor(days / 7), 2);
+ },
+ "%y": (date) => (date.tm_year + 1900).toString().substring(2),
+ "%Y": (date) => date.tm_year + 1900,
+ "%z": (date) => {
+ var off = date.tm_gmtoff;
+ var ahead = off >= 0;
+ off = Math.abs(off) / 60;
+ off = (off / 60) * 100 + (off % 60);
+ return (ahead ? "+" : "-") + String("0000" + off).slice(-4);
+ },
+ "%Z": (date) => date.tm_zone,
+ "%%": () => "%",
+ };
+ pattern = pattern.replace(/%%/g, "\0\0");
+ for (var rule in EXPANSION_RULES_2) {
+ if (pattern.includes(rule)) {
+ pattern = pattern.replace(
+ new RegExp(rule, "g"),
+ EXPANSION_RULES_2[rule](date),
+ );
+ }
+ }
+ pattern = pattern.replace(/\0\0/g, "%");
+ var bytes = intArrayFromString(pattern, false);
+ if (bytes.length > maxsize) {
+ return 0;
+ }
+ writeArrayToMemory(bytes, s);
+ return bytes.length - 1;
+ }
+
+ function _strftime_l(s, maxsize, format, tm, loc) {
+ s >>>= 0;
+ maxsize >>>= 0;
+ format >>>= 0;
+ tm >>>= 0;
+ loc >>>= 0;
+ return _strftime(s, maxsize, format, tm);
+ }
+
+ var FS_unlink = (path) => FS.unlink(path);
+
+ PThread.init();
+
+ var FSNode = /** @constructor */ function (parent, name, mode, rdev) {
+ if (!parent) {
+ parent = this;
+ }
+ this.parent = parent;
+ this.mount = parent.mount;
+ this.mounted = null;
+ this.id = FS.nextInode++;
+ this.name = name;
+ this.mode = mode;
+ this.node_ops = {};
+ this.stream_ops = {};
+ this.rdev = rdev;
+ };
+
+ var readMode = 292 | /*292*/ 73;
+
+ /*73*/ var writeMode = 146;
+
+ /*146*/ Object.defineProperties(FSNode.prototype, {
+ read: {
+ get: /** @this{FSNode} */ function () {
+ return (this.mode & readMode) === readMode;
+ },
+ set: /** @this{FSNode} */ function (val) {
+ val ? (this.mode |= readMode) : (this.mode &= ~readMode);
+ },
+ },
+ write: {
+ get: /** @this{FSNode} */ function () {
+ return (this.mode & writeMode) === writeMode;
+ },
+ set: /** @this{FSNode} */ function (val) {
+ val ? (this.mode |= writeMode) : (this.mode &= ~writeMode);
+ },
+ },
+ isFolder: {
+ get: /** @this{FSNode} */ function () {
+ return FS.isDir(this.mode);
+ },
+ },
+ isDevice: {
+ get: /** @this{FSNode} */ function () {
+ return FS.isChrdev(this.mode);
+ },
+ },
+ });
+
+ FS.FSNode = FSNode;
+
+ FS.createPreloadedFile = FS_createPreloadedFile;
+
+ FS.staticInit();
+
+ Module["FS_createPath"] = FS.createPath;
+
+ Module["FS_createDataFile"] = FS.createDataFile;
+
+ Module["FS_createPreloadedFile"] = FS.createPreloadedFile;
+
+ Module["FS_unlink"] = FS.unlink;
+
+ Module["FS_createLazyFile"] = FS.createLazyFile;
+
+ Module["FS_createDevice"] = FS.createDevice;
+
+ embind_init_charCodes();
+
+ BindingError = Module["BindingError"] = class BindingError extends Error {
+ constructor(message) {
+ super(message);
+ this.name = "BindingError";
+ }
+ };
+
+ InternalError = Module["InternalError"] = class InternalError extends (
+ Error
+ ) {
+ constructor(message) {
+ super(message);
+ this.name = "InternalError";
+ }
+ };
+
+ init_ClassHandle();
+
+ init_embind();
+
+ init_RegisteredPointer();
+
+ UnboundTypeError = Module["UnboundTypeError"] = extendError(
+ Error,
+ "UnboundTypeError",
+ );
+
+ handleAllocatorInit();
+
+ init_emval();
+
+ var proxiedFunctionTable = [
+ _proc_exit,
+ exitOnMainThread,
+ pthreadCreateProxied,
+ ___syscall_chmod,
+ ___syscall_faccessat,
+ ___syscall_fadvise64,
+ ___syscall_fchmod,
+ ___syscall_fcntl64,
+ ___syscall_fstat64,
+ ___syscall_getcwd,
+ ___syscall_getdents64,
+ ___syscall_ioctl,
+ ___syscall_lstat64,
+ ___syscall_mkdirat,
+ ___syscall_newfstatat,
+ ___syscall_openat,
+ ___syscall_readlinkat,
+ ___syscall_renameat,
+ ___syscall_stat64,
+ ___syscall_unlinkat,
+ __emscripten_runtime_keepalive_clear,
+ __mmap_js,
+ __munmap_js,
+ __setitimer_js,
+ _emscripten_force_exit,
+ _environ_get,
+ _environ_sizes_get,
+ _fd_close,
+ _fd_fdstat_get,
+ _fd_pread,
+ _fd_pwrite,
+ _fd_read,
+ _fd_seek,
+ _fd_write,
+ ];
+
+ var wasmImports = {
+ /** @export */ __assert_fail: ___assert_fail,
+ /** @export */ __asyncjs__fetch_asset: __asyncjs__fetch_asset,
+ /** @export */ __call_sighandler: ___call_sighandler,
+ /** @export */ __cxa_begin_catch: ___cxa_begin_catch,
+ /** @export */ __cxa_end_catch: ___cxa_end_catch,
+ /** @export */ __cxa_find_matching_catch_2: ___cxa_find_matching_catch_2,
+ /** @export */ __cxa_find_matching_catch_3: ___cxa_find_matching_catch_3,
+ /** @export */ __cxa_find_matching_catch_4: ___cxa_find_matching_catch_4,
+ /** @export */ __cxa_get_exception_ptr: ___cxa_get_exception_ptr,
+ /** @export */ __cxa_rethrow: ___cxa_rethrow,
+ /** @export */ __cxa_rethrow_primary_exception:
+ ___cxa_rethrow_primary_exception,
+ /** @export */ __cxa_throw: ___cxa_throw,
+ /** @export */ __cxa_uncaught_exceptions: ___cxa_uncaught_exceptions,
+ /** @export */ __emscripten_init_main_thread_js:
+ ___emscripten_init_main_thread_js,
+ /** @export */ __emscripten_thread_cleanup: ___emscripten_thread_cleanup,
+ /** @export */ __pthread_create_js: ___pthread_create_js,
+ /** @export */ __resumeException: ___resumeException,
+ /** @export */ __syscall_chmod: ___syscall_chmod,
+ /** @export */ __syscall_faccessat: ___syscall_faccessat,
+ /** @export */ __syscall_fadvise64: ___syscall_fadvise64,
+ /** @export */ __syscall_fchmod: ___syscall_fchmod,
+ /** @export */ __syscall_fcntl64: ___syscall_fcntl64,
+ /** @export */ __syscall_fstat64: ___syscall_fstat64,
+ /** @export */ __syscall_getcwd: ___syscall_getcwd,
+ /** @export */ __syscall_getdents64: ___syscall_getdents64,
+ /** @export */ __syscall_ioctl: ___syscall_ioctl,
+ /** @export */ __syscall_lstat64: ___syscall_lstat64,
+ /** @export */ __syscall_mkdirat: ___syscall_mkdirat,
+ /** @export */ __syscall_newfstatat: ___syscall_newfstatat,
+ /** @export */ __syscall_openat: ___syscall_openat,
+ /** @export */ __syscall_readlinkat: ___syscall_readlinkat,
+ /** @export */ __syscall_renameat: ___syscall_renameat,
+ /** @export */ __syscall_stat64: ___syscall_stat64,
+ /** @export */ __syscall_unlinkat: ___syscall_unlinkat,
+ /** @export */ _embind_register_bigint: __embind_register_bigint,
+ /** @export */ _embind_register_bool: __embind_register_bool,
+ /** @export */ _embind_register_class: __embind_register_class,
+ /** @export */ _embind_register_class_class_function:
+ __embind_register_class_class_function,
+ /** @export */ _embind_register_class_class_property:
+ __embind_register_class_class_property,
+ /** @export */ _embind_register_class_constructor:
+ __embind_register_class_constructor,
+ /** @export */ _embind_register_class_function:
+ __embind_register_class_function,
+ /** @export */ _embind_register_class_property:
+ __embind_register_class_property,
+ /** @export */ _embind_register_emval: __embind_register_emval,
+ /** @export */ _embind_register_enum: __embind_register_enum,
+ /** @export */ _embind_register_enum_value: __embind_register_enum_value,
+ /** @export */ _embind_register_float: __embind_register_float,
+ /** @export */ _embind_register_function: __embind_register_function,
+ /** @export */ _embind_register_integer: __embind_register_integer,
+ /** @export */ _embind_register_memory_view:
+ __embind_register_memory_view,
+ /** @export */ _embind_register_smart_ptr: __embind_register_smart_ptr,
+ /** @export */ _embind_register_std_string: __embind_register_std_string,
+ /** @export */ _embind_register_std_wstring:
+ __embind_register_std_wstring,
+ /** @export */ _embind_register_void: __embind_register_void,
+ /** @export */ _emscripten_get_now_is_monotonic:
+ __emscripten_get_now_is_monotonic,
+ /** @export */ _emscripten_notify_mailbox_postmessage:
+ __emscripten_notify_mailbox_postmessage,
+ /** @export */ _emscripten_receive_on_main_thread_js:
+ __emscripten_receive_on_main_thread_js,
+ /** @export */ _emscripten_runtime_keepalive_clear:
+ __emscripten_runtime_keepalive_clear,
+ /** @export */ _emscripten_thread_mailbox_await:
+ __emscripten_thread_mailbox_await,
+ /** @export */ _emscripten_thread_set_strongref:
+ __emscripten_thread_set_strongref,
+ /** @export */ _emval_as: __emval_as,
+ /** @export */ _emval_as_int64: __emval_as_int64,
+ /** @export */ _emval_as_uint64: __emval_as_uint64,
+ /** @export */ _emval_call: __emval_call,
+ /** @export */ _emval_call_method: __emval_call_method,
+ /** @export */ _emval_decref: __emval_decref,
+ /** @export */ _emval_equals: __emval_equals,
+ /** @export */ _emval_get_global: __emval_get_global,
+ /** @export */ _emval_get_method_caller: __emval_get_method_caller,
+ /** @export */ _emval_get_property: __emval_get_property,
+ /** @export */ _emval_incref: __emval_incref,
+ /** @export */ _emval_instanceof: __emval_instanceof,
+ /** @export */ _emval_new_array: __emval_new_array,
+ /** @export */ _emval_new_cstring: __emval_new_cstring,
+ /** @export */ _emval_new_object: __emval_new_object,
+ /** @export */ _emval_run_destructors: __emval_run_destructors,
+ /** @export */ _emval_set_property: __emval_set_property,
+ /** @export */ _emval_take_value: __emval_take_value,
+ /** @export */ _emval_typeof: __emval_typeof,
+ /** @export */ _mmap_js: __mmap_js,
+ /** @export */ _munmap_js: __munmap_js,
+ /** @export */ _setitimer_js: __setitimer_js,
+ /** @export */ abort: _abort,
+ /** @export */ addToLoadedFiles: addToLoadedFiles,
+ /** @export */ downloadJS: downloadJS,
+ /** @export */ emscripten_check_blocking_allowed:
+ _emscripten_check_blocking_allowed,
+ /** @export */ emscripten_date_now: _emscripten_date_now,
+ /** @export */ emscripten_exit_with_live_runtime:
+ _emscripten_exit_with_live_runtime,
+ /** @export */ emscripten_force_exit: _emscripten_force_exit,
+ /** @export */ emscripten_get_heap_max: _emscripten_get_heap_max,
+ /** @export */ emscripten_get_now: _emscripten_get_now,
+ /** @export */ emscripten_num_logical_cores:
+ _emscripten_num_logical_cores,
+ /** @export */ emscripten_resize_heap: _emscripten_resize_heap,
+ /** @export */ environ_get: _environ_get,
+ /** @export */ environ_sizes_get: _environ_sizes_get,
+ /** @export */ exit: _exit,
+ /** @export */ fd_close: _fd_close,
+ /** @export */ fd_fdstat_get: _fd_fdstat_get,
+ /** @export */ fd_pread: _fd_pread,
+ /** @export */ fd_pwrite: _fd_pwrite,
+ /** @export */ fd_read: _fd_read,
+ /** @export */ fd_seek: _fd_seek,
+ /** @export */ fd_write: _fd_write,
+ /** @export */ invoke_d: invoke_d,
+ /** @export */ invoke_dddd: invoke_dddd,
+ /** @export */ invoke_ddiiiii: invoke_ddiiiii,
+ /** @export */ invoke_di: invoke_di,
+ /** @export */ invoke_did: invoke_did,
+ /** @export */ invoke_didii: invoke_didii,
+ /** @export */ invoke_dif: invoke_dif,
+ /** @export */ invoke_dii: invoke_dii,
+ /** @export */ invoke_diii: invoke_diii,
+ /** @export */ invoke_diiiii: invoke_diiiii,
+ /** @export */ invoke_dj: invoke_dj,
+ /** @export */ invoke_fi: invoke_fi,
+ /** @export */ invoke_fid: invoke_fid,
+ /** @export */ invoke_fif: invoke_fif,
+ /** @export */ invoke_fii: invoke_fii,
+ /** @export */ invoke_fiii: invoke_fiii,
+ /** @export */ invoke_fiiid: invoke_fiiid,
+ /** @export */ invoke_i: invoke_i,
+ /** @export */ invoke_idddii: invoke_idddii,
+ /** @export */ invoke_idddiii: invoke_idddiii,
+ /** @export */ invoke_iddii: invoke_iddii,
+ /** @export */ invoke_iddiii: invoke_iddiii,
+ /** @export */ invoke_idii: invoke_idii,
+ /** @export */ invoke_idiiiiii: invoke_idiiiiii,
+ /** @export */ invoke_idiiiiiii: invoke_idiiiiiii,
+ /** @export */ invoke_ii: invoke_ii,
+ /** @export */ invoke_iid: invoke_iid,
+ /** @export */ invoke_iidd: invoke_iidd,
+ /** @export */ invoke_iidi: invoke_iidi,
+ /** @export */ invoke_iidii: invoke_iidii,
+ /** @export */ invoke_iidiii: invoke_iidiii,
+ /** @export */ invoke_iidiiii: invoke_iidiiii,
+ /** @export */ invoke_iidiiiii: invoke_iidiiiii,
+ /** @export */ invoke_iif: invoke_iif,
+ /** @export */ invoke_iiffi: invoke_iiffi,
+ /** @export */ invoke_iii: invoke_iii,
+ /** @export */ invoke_iiid: invoke_iiid,
+ /** @export */ invoke_iiidddii: invoke_iiidddii,
+ /** @export */ invoke_iiiddi: invoke_iiiddi,
+ /** @export */ invoke_iiiddii: invoke_iiiddii,
+ /** @export */ invoke_iiidi: invoke_iiidi,
+ /** @export */ invoke_iiidii: invoke_iiidii,
+ /** @export */ invoke_iiidiiidiiiidiif: invoke_iiidiiidiiiidiif,
+ /** @export */ invoke_iiidiiiii: invoke_iiidiiiii,
+ /** @export */ invoke_iiifi: invoke_iiifi,
+ /** @export */ invoke_iiii: invoke_iiii,
+ /** @export */ invoke_iiiid: invoke_iiiid,
+ /** @export */ invoke_iiiiddd: invoke_iiiiddd,
+ /** @export */ invoke_iiiidii: invoke_iiiidii,
+ /** @export */ invoke_iiiidiii: invoke_iiiidiii,
+ /** @export */ invoke_iiiidiiii: invoke_iiiidiiii,
+ /** @export */ invoke_iiiidiiiiii: invoke_iiiidiiiiii,
+ /** @export */ invoke_iiiifffffiiff: invoke_iiiifffffiiff,
+ /** @export */ invoke_iiiii: invoke_iiiii,
+ /** @export */ invoke_iiiiid: invoke_iiiiid,
+ /** @export */ invoke_iiiiidiiii: invoke_iiiiidiiii,
+ /** @export */ invoke_iiiiii: invoke_iiiiii,
+ /** @export */ invoke_iiiiiid: invoke_iiiiiid,
+ /** @export */ invoke_iiiiiii: invoke_iiiiiii,
+ /** @export */ invoke_iiiiiiidddi: invoke_iiiiiiidddi,
+ /** @export */ invoke_iiiiiiii: invoke_iiiiiiii,
+ /** @export */ invoke_iiiiiiiidi: invoke_iiiiiiiidi,
+ /** @export */ invoke_iiiiiiiii: invoke_iiiiiiiii,
+ /** @export */ invoke_iiiiiiiiii: invoke_iiiiiiiiii,
+ /** @export */ invoke_iiiiiiiiiii: invoke_iiiiiiiiiii,
+ /** @export */ invoke_iiiiiiiiiiii: invoke_iiiiiiiiiiii,
+ /** @export */ invoke_iiiiiiiiiiiii: invoke_iiiiiiiiiiiii,
+ /** @export */ invoke_iiiiiiiiiiiiii: invoke_iiiiiiiiiiiiii,
+ /** @export */ invoke_iiiiiiiiiiiiiiiiiiii: invoke_iiiiiiiiiiiiiiiiiiii,
+ /** @export */ invoke_iiiiij: invoke_iiiiij,
+ /** @export */ invoke_iiiijjii: invoke_iiiijjii,
+ /** @export */ invoke_iiijj: invoke_iiijj,
+ /** @export */ invoke_iij: invoke_iij,
+ /** @export */ invoke_ij: invoke_ij,
+ /** @export */ invoke_j: invoke_j,
+ /** @export */ invoke_ji: invoke_ji,
+ /** @export */ invoke_jii: invoke_jii,
+ /** @export */ invoke_jiii: invoke_jiii,
+ /** @export */ invoke_jiiii: invoke_jiiii,
+ /** @export */ invoke_jiiij: invoke_jiiij,
+ /** @export */ invoke_jiij: invoke_jiij,
+ /** @export */ invoke_jjiii: invoke_jjiii,
+ /** @export */ invoke_v: invoke_v,
+ /** @export */ invoke_vd: invoke_vd,
+ /** @export */ invoke_vdiii: invoke_vdiii,
+ /** @export */ invoke_vfffi: invoke_vfffi,
+ /** @export */ invoke_vfiiii: invoke_vfiiii,
+ /** @export */ invoke_vi: invoke_vi,
+ /** @export */ invoke_vid: invoke_vid,
+ /** @export */ invoke_viddi: invoke_viddi,
+ /** @export */ invoke_vidii: invoke_vidii,
+ /** @export */ invoke_vidiii: invoke_vidiii,
+ /** @export */ invoke_vidiiii: invoke_vidiiii,
+ /** @export */ invoke_vif: invoke_vif,
+ /** @export */ invoke_viffffi: invoke_viffffi,
+ /** @export */ invoke_vifffi: invoke_vifffi,
+ /** @export */ invoke_vifii: invoke_vifii,
+ /** @export */ invoke_vii: invoke_vii,
+ /** @export */ invoke_viid: invoke_viid,
+ /** @export */ invoke_viidi: invoke_viidi,
+ /** @export */ invoke_viidii: invoke_viidii,
+ /** @export */ invoke_viif: invoke_viif,
+ /** @export */ invoke_viiffffi: invoke_viiffffi,
+ /** @export */ invoke_viiffi: invoke_viiffi,
+ /** @export */ invoke_viii: invoke_viii,
+ /** @export */ invoke_viiid: invoke_viiid,
+ /** @export */ invoke_viiidi: invoke_viiidi,
+ /** @export */ invoke_viiidii: invoke_viiidii,
+ /** @export */ invoke_viiidiiiii: invoke_viiidiiiii,
+ /** @export */ invoke_viiif: invoke_viiif,
+ /** @export */ invoke_viiii: invoke_viiii,
+ /** @export */ invoke_viiiid: invoke_viiiid,
+ /** @export */ invoke_viiiidi: invoke_viiiidi,
+ /** @export */ invoke_viiiidiiiiii: invoke_viiiidiiiiii,
+ /** @export */ invoke_viiiii: invoke_viiiii,
+ /** @export */ invoke_viiiiid: invoke_viiiiid,
+ /** @export */ invoke_viiiiidi: invoke_viiiiidi,
+ /** @export */ invoke_viiiiii: invoke_viiiiii,
+ /** @export */ invoke_viiiiiii: invoke_viiiiiii,
+ /** @export */ invoke_viiiiiiid: invoke_viiiiiiid,
+ /** @export */ invoke_viiiiiiii: invoke_viiiiiiii,
+ /** @export */ invoke_viiiiiiiii: invoke_viiiiiiiii,
+ /** @export */ invoke_viiiiiiiiii: invoke_viiiiiiiiii,
+ /** @export */ invoke_viiiiiiiiiiii: invoke_viiiiiiiiiiii,
+ /** @export */ invoke_viiiiiiiiiiiii: invoke_viiiiiiiiiiiii,
+ /** @export */ invoke_viiiiiiiiiiiiiii: invoke_viiiiiiiiiiiiiii,
+ /** @export */ invoke_viiiijii: invoke_viiiijii,
+ /** @export */ invoke_viij: invoke_viij,
+ /** @export */ invoke_viiji: invoke_viiji,
+ /** @export */ invoke_vij: invoke_vij,
+ /** @export */ invoke_viji: invoke_viji,
+ /** @export */ invoke_vijii: invoke_vijii,
+ /** @export */ invoke_vijiiiii: invoke_vijiiiii,
+ /** @export */ invoke_vijji: invoke_vijji,
+ /** @export */ invoke_vjiii: invoke_vjiii,
+ /** @export */ invoke_vjiiii: invoke_vjiiii,
+ /** @export */ llvm_eh_typeid_for: _llvm_eh_typeid_for,
+ /** @export */ memory: wasmMemory || Module["wasmMemory"],
+ /** @export */ proc_exit: _proc_exit,
+ /** @export */ strftime_l: _strftime_l,
+ };
+
+ var wasmExports = createWasm();
+
+ var ___wasm_call_ctors = () =>
+ (___wasm_call_ctors = wasmExports["__wasm_call_ctors"])();
+
+ var _pthread_self = (Module["_pthread_self"] = () =>
+ (_pthread_self = Module["_pthread_self"] =
+ wasmExports["pthread_self"])());
+
+ var ___cxa_free_exception = (a0) =>
+ (___cxa_free_exception = wasmExports["__cxa_free_exception"])(a0);
+
+ var _free = (a0) => (_free = wasmExports["free"])(a0);
+
+ var _malloc = (a0) => (_malloc = wasmExports["malloc"])(a0);
+
+ var ___errno_location = () =>
+ (___errno_location = wasmExports["__errno_location"])();
+
+ var __emscripten_tls_init = (Module["__emscripten_tls_init"] = () =>
+ (__emscripten_tls_init = Module["__emscripten_tls_init"] =
+ wasmExports["_emscripten_tls_init"])());
+
+ var _emscripten_builtin_memalign = (a0, a1) =>
+ (_emscripten_builtin_memalign =
+ wasmExports["emscripten_builtin_memalign"])(a0, a1);
+
+ var ___getTypeName = (a0) =>
+ (___getTypeName = wasmExports["__getTypeName"])(a0);
+
+ var __embind_initialize_bindings = (Module["__embind_initialize_bindings"] =
+ () =>
+ (__embind_initialize_bindings = Module["__embind_initialize_bindings"] =
+ wasmExports["_embind_initialize_bindings"])());
+
+ var __emscripten_thread_init = (Module["__emscripten_thread_init"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ ) =>
+ (__emscripten_thread_init = Module["__emscripten_thread_init"] =
+ wasmExports["_emscripten_thread_init"])(a0, a1, a2, a3, a4, a5));
+
+ var __emscripten_thread_crashed = (Module["__emscripten_thread_crashed"] =
+ () =>
+ (__emscripten_thread_crashed = Module["__emscripten_thread_crashed"] =
+ wasmExports["_emscripten_thread_crashed"])());
+
+ var _emscripten_main_thread_process_queued_calls = () =>
+ (_emscripten_main_thread_process_queued_calls =
+ wasmExports["emscripten_main_thread_process_queued_calls"])();
+
+ var _emscripten_main_runtime_thread_id = () =>
+ (_emscripten_main_runtime_thread_id =
+ wasmExports["emscripten_main_runtime_thread_id"])();
+
+ var __emscripten_run_on_main_thread_js = (a0, a1, a2, a3) =>
+ (__emscripten_run_on_main_thread_js =
+ wasmExports["_emscripten_run_on_main_thread_js"])(a0, a1, a2, a3);
+
+ var __emscripten_thread_free_data = (a0) =>
+ (__emscripten_thread_free_data =
+ wasmExports["_emscripten_thread_free_data"])(a0);
+
+ var __emscripten_thread_exit = (Module["__emscripten_thread_exit"] = (a0) =>
+ (__emscripten_thread_exit = Module["__emscripten_thread_exit"] =
+ wasmExports["_emscripten_thread_exit"])(a0));
+
+ var __emscripten_timeout = (a0, a1) =>
+ (__emscripten_timeout = wasmExports["_emscripten_timeout"])(a0, a1);
+
+ var __emscripten_check_mailbox = (Module["__emscripten_check_mailbox"] =
+ () =>
+ (__emscripten_check_mailbox = Module["__emscripten_check_mailbox"] =
+ wasmExports["_emscripten_check_mailbox"])());
+
+ var _setThrew = (a0, a1) => (_setThrew = wasmExports["setThrew"])(a0, a1);
+
+ var setTempRet0 = (a0) => (setTempRet0 = wasmExports["setTempRet0"])(a0);
+
+ var _emscripten_stack_set_limits = (a0, a1) =>
+ (_emscripten_stack_set_limits =
+ wasmExports["emscripten_stack_set_limits"])(a0, a1);
+
+ var stackSave = () => (stackSave = wasmExports["stackSave"])();
+
+ var stackRestore = (a0) => (stackRestore = wasmExports["stackRestore"])(a0);
+
+ var stackAlloc = (a0) => (stackAlloc = wasmExports["stackAlloc"])(a0);
+
+ var ___cxa_decrement_exception_refcount = (a0) =>
+ (___cxa_decrement_exception_refcount =
+ wasmExports["__cxa_decrement_exception_refcount"])(a0);
+
+ var ___cxa_increment_exception_refcount = (a0) =>
+ (___cxa_increment_exception_refcount =
+ wasmExports["__cxa_increment_exception_refcount"])(a0);
+
+ var ___cxa_can_catch = (a0, a1, a2) =>
+ (___cxa_can_catch = wasmExports["__cxa_can_catch"])(a0, a1, a2);
+
+ var ___cxa_is_pointer_type = (a0) =>
+ (___cxa_is_pointer_type = wasmExports["__cxa_is_pointer_type"])(a0);
+
+ var dynCall_iii = (Module["dynCall_iii"] = (a0, a1, a2) =>
+ (dynCall_iii = Module["dynCall_iii"] = wasmExports["dynCall_iii"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_viiii = (Module["dynCall_viiii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_viiii = Module["dynCall_viiii"] = wasmExports["dynCall_viiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_vi = (Module["dynCall_vi"] = (a0, a1) =>
+ (dynCall_vi = Module["dynCall_vi"] = wasmExports["dynCall_vi"])(a0, a1));
+
+ var dynCall_iiii = (Module["dynCall_iiii"] = (a0, a1, a2, a3) =>
+ (dynCall_iiii = Module["dynCall_iiii"] = wasmExports["dynCall_iiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_ii = (Module["dynCall_ii"] = (a0, a1) =>
+ (dynCall_ii = Module["dynCall_ii"] = wasmExports["dynCall_ii"])(a0, a1));
+
+ var dynCall_iiiiii = (Module["dynCall_iiiiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_iiiiii = Module["dynCall_iiiiii"] =
+ wasmExports["dynCall_iiiiii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_vii = (Module["dynCall_vii"] = (a0, a1, a2) =>
+ (dynCall_vii = Module["dynCall_vii"] = wasmExports["dynCall_vii"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_i = (Module["dynCall_i"] = (a0) =>
+ (dynCall_i = Module["dynCall_i"] = wasmExports["dynCall_i"])(a0));
+
+ var dynCall_viiiii = (Module["dynCall_viiiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_viiiii = Module["dynCall_viiiii"] =
+ wasmExports["dynCall_viiiii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_viii = (Module["dynCall_viii"] = (a0, a1, a2, a3) =>
+ (dynCall_viii = Module["dynCall_viii"] = wasmExports["dynCall_viii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_vid = (Module["dynCall_vid"] = (a0, a1, a2) =>
+ (dynCall_vid = Module["dynCall_vid"] = wasmExports["dynCall_vid"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_viid = (Module["dynCall_viid"] = (a0, a1, a2, a3) =>
+ (dynCall_viid = Module["dynCall_viid"] = wasmExports["dynCall_viid"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_di = (Module["dynCall_di"] = (a0, a1) =>
+ (dynCall_di = Module["dynCall_di"] = wasmExports["dynCall_di"])(a0, a1));
+
+ var dynCall_dii = (Module["dynCall_dii"] = (a0, a1, a2) =>
+ (dynCall_dii = Module["dynCall_dii"] = wasmExports["dynCall_dii"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_v = (Module["dynCall_v"] = (a0) =>
+ (dynCall_v = Module["dynCall_v"] = wasmExports["dynCall_v"])(a0));
+
+ var dynCall_iiiii = (Module["dynCall_iiiii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iiiii = Module["dynCall_iiiii"] = wasmExports["dynCall_iiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_viiid = (Module["dynCall_viiid"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_viiid = Module["dynCall_viiid"] = wasmExports["dynCall_viiid"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iiiid = (Module["dynCall_iiiid"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iiiid = Module["dynCall_iiiid"] = wasmExports["dynCall_iiiid"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iiiiiiii = (Module["dynCall_iiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_iiiiiiii = Module["dynCall_iiiiiiii"] =
+ wasmExports["dynCall_iiiiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_viiiiii = (Module["dynCall_viiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_viiiiii = Module["dynCall_viiiiii"] =
+ wasmExports["dynCall_viiiiii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_diiiii = (Module["dynCall_diiiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_diiiii = Module["dynCall_diiiii"] =
+ wasmExports["dynCall_diiiii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_iiiiiii = (Module["dynCall_iiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iiiiiii = Module["dynCall_iiiiiii"] =
+ wasmExports["dynCall_iiiiiii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_diii = (Module["dynCall_diii"] = (a0, a1, a2, a3) =>
+ (dynCall_diii = Module["dynCall_diii"] = wasmExports["dynCall_diii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_d = (Module["dynCall_d"] = (a0) =>
+ (dynCall_d = Module["dynCall_d"] = wasmExports["dynCall_d"])(a0));
+
+ var dynCall_iiid = (Module["dynCall_iiid"] = (a0, a1, a2, a3) =>
+ (dynCall_iiid = Module["dynCall_iiid"] = wasmExports["dynCall_iiid"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_iiiiid = (Module["dynCall_iiiiid"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_iiiiid = Module["dynCall_iiiiid"] =
+ wasmExports["dynCall_iiiiid"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_iiiiiiiii = (Module["dynCall_iiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_iiiiiiiii = Module["dynCall_iiiiiiiii"] =
+ wasmExports["dynCall_iiiiiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_vd = (Module["dynCall_vd"] = (a0, a1) =>
+ (dynCall_vd = Module["dynCall_vd"] = wasmExports["dynCall_vd"])(a0, a1));
+
+ var dynCall_iiiiiiiiiiii = (Module["dynCall_iiiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ) =>
+ (dynCall_iiiiiiiiiiii = Module["dynCall_iiiiiiiiiiii"] =
+ wasmExports["dynCall_iiiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ));
+
+ var dynCall_iid = (Module["dynCall_iid"] = (a0, a1, a2) =>
+ (dynCall_iid = Module["dynCall_iid"] = wasmExports["dynCall_iid"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_viidi = (Module["dynCall_viidi"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_viidi = Module["dynCall_viidi"] = wasmExports["dynCall_viidi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iidiii = (Module["dynCall_iidiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_iidiii = Module["dynCall_iidiii"] =
+ wasmExports["dynCall_iidiii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_iiiiiiiiii = (Module["dynCall_iiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ) =>
+ (dynCall_iiiiiiiiii = Module["dynCall_iiiiiiiiii"] =
+ wasmExports["dynCall_iiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ));
+
+ var dynCall_iiidi = (Module["dynCall_iiidi"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iiidi = Module["dynCall_iiidi"] = wasmExports["dynCall_iiidi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_fiii = (Module["dynCall_fiii"] = (a0, a1, a2, a3) =>
+ (dynCall_fiii = Module["dynCall_fiii"] = wasmExports["dynCall_fiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_iiifi = (Module["dynCall_iiifi"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iiifi = Module["dynCall_iiifi"] = wasmExports["dynCall_iiifi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_viiiiiii = (Module["dynCall_viiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_viiiiiii = Module["dynCall_viiiiiii"] =
+ wasmExports["dynCall_viiiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_viif = (Module["dynCall_viif"] = (a0, a1, a2, a3) =>
+ (dynCall_viif = Module["dynCall_viif"] = wasmExports["dynCall_viif"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_iiffi = (Module["dynCall_iiffi"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iiffi = Module["dynCall_iiffi"] = wasmExports["dynCall_iiffi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_fif = (Module["dynCall_fif"] = (a0, a1, a2) =>
+ (dynCall_fif = Module["dynCall_fif"] = wasmExports["dynCall_fif"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_iif = (Module["dynCall_iif"] = (a0, a1, a2) =>
+ (dynCall_iif = Module["dynCall_iif"] = wasmExports["dynCall_iif"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_dif = (Module["dynCall_dif"] = (a0, a1, a2) =>
+ (dynCall_dif = Module["dynCall_dif"] = wasmExports["dynCall_dif"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_viiif = (Module["dynCall_viiif"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_viiif = Module["dynCall_viiif"] = wasmExports["dynCall_viiif"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_viiiiiiii = (Module["dynCall_viiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_viiiiiiii = Module["dynCall_viiiiiiii"] =
+ wasmExports["dynCall_viiiiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_viiidiiiii = (Module["dynCall_viiidiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ) =>
+ (dynCall_viiidiiiii = Module["dynCall_viiidiiiii"] =
+ wasmExports["dynCall_viiidiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ));
+
+ var dynCall_viiiidi = (Module["dynCall_viiiidi"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_viiiidi = Module["dynCall_viiiidi"] =
+ wasmExports["dynCall_viiiidi"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_viiidii = (Module["dynCall_viiidii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_viiidii = Module["dynCall_viiidii"] =
+ wasmExports["dynCall_viiidii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_viiiid = (Module["dynCall_viiiid"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_viiiid = Module["dynCall_viiiid"] =
+ wasmExports["dynCall_viiiid"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_iiiidiii = (Module["dynCall_iiiidiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_iiiidiii = Module["dynCall_iiiidiii"] =
+ wasmExports["dynCall_iiiidiii"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_iiiiidiiii = (Module["dynCall_iiiiidiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ) =>
+ (dynCall_iiiiidiiii = Module["dynCall_iiiiidiiii"] =
+ wasmExports["dynCall_iiiiidiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ));
+
+ var dynCall_viiiiid = (Module["dynCall_viiiiid"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_viiiiid = Module["dynCall_viiiiid"] =
+ wasmExports["dynCall_viiiiid"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_viiiiidi = (Module["dynCall_viiiiidi"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_viiiiidi = Module["dynCall_viiiiidi"] =
+ wasmExports["dynCall_viiiiidi"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_viiffffi = (Module["dynCall_viiffffi"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_viiffffi = Module["dynCall_viiffffi"] =
+ wasmExports["dynCall_viiffffi"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_vifffi = (Module["dynCall_vifffi"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_vifffi = Module["dynCall_vifffi"] =
+ wasmExports["dynCall_vifffi"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_vfffi = (Module["dynCall_vfffi"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_vfffi = Module["dynCall_vfffi"] = wasmExports["dynCall_vfffi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_viffffi = (Module["dynCall_viffffi"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_viffffi = Module["dynCall_viffffi"] =
+ wasmExports["dynCall_viffffi"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_fid = (Module["dynCall_fid"] = (a0, a1, a2) =>
+ (dynCall_fid = Module["dynCall_fid"] = wasmExports["dynCall_fid"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_iiiiiid = (Module["dynCall_iiiiiid"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iiiiiid = Module["dynCall_iiiiiid"] =
+ wasmExports["dynCall_iiiiiid"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_viiidi = (Module["dynCall_viiidi"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_viiidi = Module["dynCall_viiidi"] =
+ wasmExports["dynCall_viiidi"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_viddi = (Module["dynCall_viddi"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_viddi = Module["dynCall_viddi"] = wasmExports["dynCall_viddi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_viiiiiiid = (Module["dynCall_viiiiiiid"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_viiiiiiid = Module["dynCall_viiiiiiid"] =
+ wasmExports["dynCall_viiiiiiid"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_viiffi = (Module["dynCall_viiffi"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_viiffi = Module["dynCall_viiffi"] =
+ wasmExports["dynCall_viiffi"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_jiij = (Module["dynCall_jiij"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_jiij = Module["dynCall_jiij"] = wasmExports["dynCall_jiij"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_ji = (Module["dynCall_ji"] = (a0, a1) =>
+ (dynCall_ji = Module["dynCall_ji"] = wasmExports["dynCall_ji"])(a0, a1));
+
+ var dynCall_iiiifffffiiff = (Module["dynCall_iiiifffffiiff"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ) =>
+ (dynCall_iiiifffffiiff = Module["dynCall_iiiifffffiiff"] =
+ wasmExports["dynCall_iiiifffffiiff"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ));
+
+ var dynCall_vif = (Module["dynCall_vif"] = (a0, a1, a2) =>
+ (dynCall_vif = Module["dynCall_vif"] = wasmExports["dynCall_vif"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_jif = (Module["dynCall_jif"] = (a0, a1, a2) =>
+ (dynCall_jif = Module["dynCall_jif"] = wasmExports["dynCall_jif"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_vifii = (Module["dynCall_vifii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_vifii = Module["dynCall_vifii"] = wasmExports["dynCall_vifii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_vfiiii = (Module["dynCall_vfiiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_vfiiii = Module["dynCall_vfiiii"] =
+ wasmExports["dynCall_vfiiii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_vidii = (Module["dynCall_vidii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_vidii = Module["dynCall_vidii"] = wasmExports["dynCall_vidii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_jiiijji = (Module["dynCall_jiiijji"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_jiiijji = Module["dynCall_jiiijji"] =
+ wasmExports["dynCall_jiiijji"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_iiiiiiiiiiiiiiiiiiii = (Module["dynCall_iiiiiiiiiiiiiiiiiiii"] =
+ (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ a16,
+ a17,
+ a18,
+ a19,
+ ) =>
+ (dynCall_iiiiiiiiiiiiiiiiiiii = Module["dynCall_iiiiiiiiiiiiiiiiiiii"] =
+ wasmExports["dynCall_iiiiiiiiiiiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ a16,
+ a17,
+ a18,
+ a19,
+ ));
+
+ var dynCall_viiiiiiiiiiii = (Module["dynCall_viiiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ) =>
+ (dynCall_viiiiiiiiiiii = Module["dynCall_viiiiiiiiiiii"] =
+ wasmExports["dynCall_viiiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ));
+
+ var dynCall_viiiiiiiii = (Module["dynCall_viiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ) =>
+ (dynCall_viiiiiiiii = Module["dynCall_viiiiiiiii"] =
+ wasmExports["dynCall_viiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ));
+
+ var dynCall_iiiiiiiiiiiii = (Module["dynCall_iiiiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ) =>
+ (dynCall_iiiiiiiiiiiii = Module["dynCall_iiiiiiiiiiiii"] =
+ wasmExports["dynCall_iiiiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ));
+
+ var dynCall_ff = (Module["dynCall_ff"] = (a0, a1) =>
+ (dynCall_ff = Module["dynCall_ff"] = wasmExports["dynCall_ff"])(a0, a1));
+
+ var dynCall_fff = (Module["dynCall_fff"] = (a0, a1, a2) =>
+ (dynCall_fff = Module["dynCall_fff"] = wasmExports["dynCall_fff"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_iiijiii = (Module["dynCall_iiijiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_iiijiii = Module["dynCall_iiijiii"] =
+ wasmExports["dynCall_iiijiii"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_jii = (Module["dynCall_jii"] = (a0, a1, a2) =>
+ (dynCall_jii = Module["dynCall_jii"] = wasmExports["dynCall_jii"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_iiij = (Module["dynCall_iiij"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iiij = Module["dynCall_iiij"] = wasmExports["dynCall_iiij"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iiiji = (Module["dynCall_iiiji"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_iiiji = Module["dynCall_iiiji"] = wasmExports["dynCall_iiiji"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ ));
+
+ var dynCall_viiiiiiiiii = (Module["dynCall_viiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) =>
+ (dynCall_viiiiiiiiii = Module["dynCall_viiiiiiiiii"] =
+ wasmExports["dynCall_viiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ));
+
+ var dynCall_iiiiiiiiiiiiii = (Module["dynCall_iiiiiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ ) =>
+ (dynCall_iiiiiiiiiiiiii = Module["dynCall_iiiiiiiiiiiiii"] =
+ wasmExports["dynCall_iiiiiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ ));
+
+ var dynCall_iidiiiii = (Module["dynCall_iidiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_iidiiiii = Module["dynCall_iidiiiii"] =
+ wasmExports["dynCall_iidiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_iddiii = (Module["dynCall_iddiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_iddiii = Module["dynCall_iddiii"] =
+ wasmExports["dynCall_iddiii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_iddii = (Module["dynCall_iddii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iddii = Module["dynCall_iddii"] = wasmExports["dynCall_iddii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_idddiii = (Module["dynCall_idddiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_idddiii = Module["dynCall_idddiii"] =
+ wasmExports["dynCall_idddiii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_idddii = (Module["dynCall_idddii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_idddii = Module["dynCall_idddii"] =
+ wasmExports["dynCall_idddii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_idii = (Module["dynCall_idii"] = (a0, a1, a2, a3) =>
+ (dynCall_idii = Module["dynCall_idii"] = wasmExports["dynCall_idii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_fiiid = (Module["dynCall_fiiid"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_fiiid = Module["dynCall_fiiid"] = wasmExports["dynCall_fiiid"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iidi = (Module["dynCall_iidi"] = (a0, a1, a2, a3) =>
+ (dynCall_iidi = Module["dynCall_iidi"] = wasmExports["dynCall_iidi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_iiiidii = (Module["dynCall_iiiidii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iiiidii = Module["dynCall_iiiidii"] =
+ wasmExports["dynCall_iiiidii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_iiiidiiiiii = (Module["dynCall_iiiidiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) =>
+ (dynCall_iiiidiiiiii = Module["dynCall_iiiidiiiiii"] =
+ wasmExports["dynCall_iiiidiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ));
+
+ var dynCall_iiidiiiii = (Module["dynCall_iiidiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_iiidiiiii = Module["dynCall_iiidiiiii"] =
+ wasmExports["dynCall_iiidiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_iiidiiidiiiidiif = (Module["dynCall_iiidiiidiiiidiif"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ ) =>
+ (dynCall_iiidiiidiiiidiif = Module["dynCall_iiidiiidiiiidiif"] =
+ wasmExports["dynCall_iiidiiidiiiidiif"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ ));
+
+ var dynCall_dddd = (Module["dynCall_dddd"] = (a0, a1, a2, a3) =>
+ (dynCall_dddd = Module["dynCall_dddd"] = wasmExports["dynCall_dddd"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_iidiiii = (Module["dynCall_iidiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iidiiii = Module["dynCall_iidiiii"] =
+ wasmExports["dynCall_iidiiii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_iiiddii = (Module["dynCall_iiiddii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iiiddii = Module["dynCall_iiiddii"] =
+ wasmExports["dynCall_iiiddii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_iiiiiiiidi = (Module["dynCall_iiiiiiiidi"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ) =>
+ (dynCall_iiiiiiiidi = Module["dynCall_iiiiiiiidi"] =
+ wasmExports["dynCall_iiiiiiiidi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ));
+
+ var dynCall_iiiddi = (Module["dynCall_iiiddi"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_iiiddi = Module["dynCall_iiiddi"] =
+ wasmExports["dynCall_iiiddi"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_iiiif = (Module["dynCall_iiiif"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iiiif = Module["dynCall_iiiif"] = wasmExports["dynCall_iiiif"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_did = (Module["dynCall_did"] = (a0, a1, a2) =>
+ (dynCall_did = Module["dynCall_did"] = wasmExports["dynCall_did"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_iiidii = (Module["dynCall_iiidii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_iiidii = Module["dynCall_iiidii"] =
+ wasmExports["dynCall_iiidii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_iiidddii = (Module["dynCall_iiidddii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_iiidddii = Module["dynCall_iiidddii"] =
+ wasmExports["dynCall_iiidddii"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_iidii = (Module["dynCall_iidii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_iidii = Module["dynCall_iidii"] = wasmExports["dynCall_iidii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_didii = (Module["dynCall_didii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_didii = Module["dynCall_didii"] = wasmExports["dynCall_didii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iiiiiiidddi = (Module["dynCall_iiiiiiidddi"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) =>
+ (dynCall_iiiiiiidddi = Module["dynCall_iiiiiiidddi"] =
+ wasmExports["dynCall_iiiiiiidddi"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ));
+
+ var dynCall_iidd = (Module["dynCall_iidd"] = (a0, a1, a2, a3) =>
+ (dynCall_iidd = Module["dynCall_iidd"] = wasmExports["dynCall_iidd"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_viji = (Module["dynCall_viji"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_viji = Module["dynCall_viji"] = wasmExports["dynCall_viji"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_viij = (Module["dynCall_viij"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_viij = Module["dynCall_viij"] = wasmExports["dynCall_viij"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_vijii = (Module["dynCall_vijii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_vijii = Module["dynCall_vijii"] = wasmExports["dynCall_vijii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ ));
+
+ var dynCall_jiiij = (Module["dynCall_jiiij"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_jiiij = Module["dynCall_jiiij"] = wasmExports["dynCall_jiiij"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ ));
+
+ var dynCall_iiijj = (Module["dynCall_iiijj"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iiijj = Module["dynCall_iiijj"] = wasmExports["dynCall_iiijj"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ));
+
+ var dynCall_vijji = (Module["dynCall_vijji"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_vijji = Module["dynCall_vijji"] = wasmExports["dynCall_vijji"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ));
+
+ var dynCall_viiji = (Module["dynCall_viiji"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_viiji = Module["dynCall_viiji"] = wasmExports["dynCall_viiji"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ ));
+
+ var dynCall_jiii = (Module["dynCall_jiii"] = (a0, a1, a2, a3) =>
+ (dynCall_jiii = Module["dynCall_jiii"] = wasmExports["dynCall_jiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_iiiiddd = (Module["dynCall_iiiiddd"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iiiiddd = Module["dynCall_iiiiddd"] =
+ wasmExports["dynCall_iiiiddd"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_idiiiiii = (Module["dynCall_idiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ ) =>
+ (dynCall_idiiiiii = Module["dynCall_idiiiiii"] =
+ wasmExports["dynCall_idiiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7));
+
+ var dynCall_idiiiiiii = (Module["dynCall_idiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_idiiiiiii = Module["dynCall_idiiiiiii"] =
+ wasmExports["dynCall_idiiiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_vidiiii = (Module["dynCall_vidiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_vidiiii = Module["dynCall_vidiiii"] =
+ wasmExports["dynCall_vidiiii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_vidiii = (Module["dynCall_vidiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_vidiii = Module["dynCall_vidiii"] =
+ wasmExports["dynCall_vidiii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_dj = (Module["dynCall_dj"] = (a0, a1, a2) =>
+ (dynCall_dj = Module["dynCall_dj"] = wasmExports["dynCall_dj"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_vdiii = (Module["dynCall_vdiii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_vdiii = Module["dynCall_vdiii"] = wasmExports["dynCall_vdiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iiiidiiii = (Module["dynCall_iiiidiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_iiiidiiii = Module["dynCall_iiiidiiii"] =
+ wasmExports["dynCall_iiiidiiii"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_viiiidiiiiii = (Module["dynCall_viiiidiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ) =>
+ (dynCall_viiiidiiiiii = Module["dynCall_viiiidiiiiii"] =
+ wasmExports["dynCall_viiiidiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ));
+
+ var dynCall_vjiiii = (Module["dynCall_vjiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_vjiiii = Module["dynCall_vjiiii"] =
+ wasmExports["dynCall_vjiiii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_ij = (Module["dynCall_ij"] = (a0, a1, a2) =>
+ (dynCall_ij = Module["dynCall_ij"] = wasmExports["dynCall_ij"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_vij = (Module["dynCall_vij"] = (a0, a1, a2, a3) =>
+ (dynCall_vij = Module["dynCall_vij"] = wasmExports["dynCall_vij"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_vijiiiii = (Module["dynCall_vijiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_vijiiiii = Module["dynCall_vijiiiii"] =
+ wasmExports["dynCall_vijiiiii"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_viiiiiiiiiii = (Module["dynCall_viiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ) =>
+ (dynCall_viiiiiiiiiii = Module["dynCall_viiiiiiiiiii"] =
+ wasmExports["dynCall_viiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ));
+
+ var dynCall_iij = (Module["dynCall_iij"] = (a0, a1, a2, a3) =>
+ (dynCall_iij = Module["dynCall_iij"] = wasmExports["dynCall_iij"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ ));
+
+ var dynCall_fi = (Module["dynCall_fi"] = (a0, a1) =>
+ (dynCall_fi = Module["dynCall_fi"] = wasmExports["dynCall_fi"])(a0, a1));
+
+ var dynCall_fii = (Module["dynCall_fii"] = (a0, a1, a2) =>
+ (dynCall_fii = Module["dynCall_fii"] = wasmExports["dynCall_fii"])(
+ a0,
+ a1,
+ a2,
+ ));
+
+ var dynCall_viiiiiiiiiiiii = (Module["dynCall_viiiiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ ) =>
+ (dynCall_viiiiiiiiiiiii = Module["dynCall_viiiiiiiiiiiii"] =
+ wasmExports["dynCall_viiiiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ ));
+
+ var dynCall_iiiiiiiiiii = (Module["dynCall_iiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) =>
+ (dynCall_iiiiiiiiiii = Module["dynCall_iiiiiiiiiii"] =
+ wasmExports["dynCall_iiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ));
+
+ var dynCall_viidii = (Module["dynCall_viidii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_viidii = Module["dynCall_viidii"] =
+ wasmExports["dynCall_viidii"])(a0, a1, a2, a3, a4, a5));
+
+ var dynCall_vjiii = (Module["dynCall_vjiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_vjiii = Module["dynCall_vjiii"] = wasmExports["dynCall_vjiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ ));
+
+ var dynCall_viiiijii = (Module["dynCall_viiiijii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_viiiijii = Module["dynCall_viiiijii"] =
+ wasmExports["dynCall_viiiijii"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_iiiijjii = (Module["dynCall_iiiijjii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ) =>
+ (dynCall_iiiijjii = Module["dynCall_iiiijjii"] =
+ wasmExports["dynCall_iiiijjii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ));
+
+ var dynCall_jjiii = (Module["dynCall_jjiii"] = (a0, a1, a2, a3, a4, a5) =>
+ (dynCall_jjiii = Module["dynCall_jjiii"] = wasmExports["dynCall_jjiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ ));
+
+ var dynCall_j = (Module["dynCall_j"] = (a0) =>
+ (dynCall_j = Module["dynCall_j"] = wasmExports["dynCall_j"])(a0));
+
+ var dynCall_ddiiiii = (Module["dynCall_ddiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_ddiiiii = Module["dynCall_ddiiiii"] =
+ wasmExports["dynCall_ddiiiii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_jiji = (Module["dynCall_jiji"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_jiji = Module["dynCall_jiji"] = wasmExports["dynCall_jiji"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_iiiiij = (Module["dynCall_iiiiij"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_iiiiij = Module["dynCall_iiiiij"] =
+ wasmExports["dynCall_iiiiij"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_viijii = (Module["dynCall_viijii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ ) =>
+ (dynCall_viijii = Module["dynCall_viijii"] =
+ wasmExports["dynCall_viijii"])(a0, a1, a2, a3, a4, a5, a6));
+
+ var dynCall_jiiii = (Module["dynCall_jiiii"] = (a0, a1, a2, a3, a4) =>
+ (dynCall_jiiii = Module["dynCall_jiiii"] = wasmExports["dynCall_jiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ ));
+
+ var dynCall_viiiiiiiiiiiiiii = (Module["dynCall_viiiiiiiiiiiiiii"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ ) =>
+ (dynCall_viiiiiiiiiiiiiii = Module["dynCall_viiiiiiiiiiiiiii"] =
+ wasmExports["dynCall_viiiiiiiiiiiiiii"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ ));
+
+ var dynCall_iiiiijj = (Module["dynCall_iiiiijj"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ ) =>
+ (dynCall_iiiiijj = Module["dynCall_iiiiijj"] =
+ wasmExports["dynCall_iiiiijj"])(a0, a1, a2, a3, a4, a5, a6, a7, a8));
+
+ var dynCall_iiiiiijj = (Module["dynCall_iiiiiijj"] = (
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ) =>
+ (dynCall_iiiiiijj = Module["dynCall_iiiiiijj"] =
+ wasmExports["dynCall_iiiiiijj"])(
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ ));
+
+ var _asyncify_start_unwind = (a0) =>
+ (_asyncify_start_unwind = wasmExports["asyncify_start_unwind"])(a0);
+
+ var _asyncify_stop_unwind = () =>
+ (_asyncify_stop_unwind = wasmExports["asyncify_stop_unwind"])();
+
+ var _asyncify_start_rewind = (a0) =>
+ (_asyncify_start_rewind = wasmExports["asyncify_start_rewind"])(a0);
+
+ var _asyncify_stop_rewind = () =>
+ (_asyncify_stop_rewind = wasmExports["asyncify_stop_rewind"])();
+
+ var ___start_em_js = (Module["___start_em_js"] = 3906828);
+
+ var ___stop_em_js = (Module["___stop_em_js"] = 3908152);
+
+ function invoke_iii(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_iii(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_viiii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vi(index, a1) {
+ var sp = stackSave();
+ try {
+ dynCall_vi(index, a1);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiii(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiii(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_ii(index, a1) {
+ var sp = stackSave();
+ try {
+ return dynCall_ii(index, a1);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vii(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ dynCall_vii(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_i(index) {
+ var sp = stackSave();
+ try {
+ return dynCall_i(index);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viii(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ dynCall_viii(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_v(index) {
+ var sp = stackSave();
+ try {
+ dynCall_v(index);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiii(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiii(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_diiiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_diiiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiii(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiii(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiidi(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiidi(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiidi(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_viiidi(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_d(index) {
+ var sp = stackSave();
+ try {
+ return dynCall_d(index);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viid(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ dynCall_viid(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viidi(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_viidi(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiid(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiid(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vid(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ dynCall_vid(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_di(index, a1) {
+ var sp = stackSave();
+ try {
+ return dynCall_di(index, a1);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_dii(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_dii(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiid(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiid(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vd(index, a1) {
+ var sp = stackSave();
+ try {
+ dynCall_vd(index, a1);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiid(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_viiid(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iid(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_iid(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iif(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_iif(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iidiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_iidiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_fiii(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_fiii(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiifi(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiifi(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiid(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiid(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viif(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ dynCall_viif(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiffi(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiffi(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_diii(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_diii(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiif(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_viiif(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiid(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiid(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiidi(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiidi(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiidiii(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiidiii(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiidi(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiidi(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiidiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiidiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiidii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_viiidii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiid(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiid(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiidiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ var sp = stackSave();
+ try {
+ dynCall_viiidiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiffffi(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ dynCall_viiffffi(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vifffi(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_vifffi(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vfffi(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_vfffi(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viffffi(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_viffffi(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_fid(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_fid(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiid(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiid(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_dif(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_dif(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viddi(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_viddi(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiiid(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiiid(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiffi(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_viiffi(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiifffffiiff(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiifffffiiff(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vif(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ dynCall_vif(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_fif(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_fif(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vifii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_vifii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vfiiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_vfiiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vidii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_vidii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ ) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_fi(index, a1) {
+ var sp = stackSave();
+ try {
+ return dynCall_fi(index, a1);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiiiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ a16,
+ a17,
+ a18,
+ a19,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiiiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ a16,
+ a17,
+ a18,
+ a19,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iidiiiii(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ return dynCall_iidiiiii(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiddii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiddii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iddiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_iddiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iddii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_iddii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_idddiii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_idddiii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_idddii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_idddii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_idii(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_idii(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_fiiid(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_fiiid(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiidiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiidiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iidi(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_iidi(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiidii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiidii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiidiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiidiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiidiiidiiiidiif(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiidiiidiiiidiif(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ a14,
+ a15,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_dddd(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_dddd(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iidiiii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iidiiii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiidi(index, a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiidi(index, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiddi(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiddi(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_did(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_did(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiidii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiidii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiddd(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiddd(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiidddii(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiidddii(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iidii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_iidii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_didii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_didii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiidddi(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiidddi(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iidd(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_iidd(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vidiiii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_vidiiii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vidiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_vidiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vdiii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_vdiii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiidiiii(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiidiiii(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_idiiiiii(index, a1, a2, a3, a4, a5, a6, a7) {
+ var sp = stackSave();
+ try {
+ return dynCall_idiiiiii(index, a1, a2, a3, a4, a5, a6, a7);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_idiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ return dynCall_idiiiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiidiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ ) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiidiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_fii(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_fii(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ ) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ a11,
+ a12,
+ a13,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ ) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiiiiiiii(
+ index,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ a8,
+ a9,
+ a10,
+ );
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viidii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_viidii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_ddiiiii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_ddiiiii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viji(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_viji(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_jiii(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_jiii(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_jiij(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_jiij(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vij(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ dynCall_vij(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_ji(index, a1) {
+ var sp = stackSave();
+ try {
+ return dynCall_ji(index, a1);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viij(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ dynCall_viij(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vijii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_vijii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiijj(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiijj(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_jii(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_jii(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vijji(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_vijji(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_jiiij(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_jiiij(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiji(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_viiji(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_dj(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_dj(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vjiiii(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ dynCall_vjiiii(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_ij(index, a1, a2) {
+ var sp = stackSave();
+ try {
+ return dynCall_ij(index, a1, a2);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vijiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ dynCall_vijiiiii(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iij(index, a1, a2, a3) {
+ var sp = stackSave();
+ try {
+ return dynCall_iij(index, a1, a2, a3);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_vjiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ dynCall_vjiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_viiiijii(index, a1, a2, a3, a4, a5, a6, a7, a8) {
+ var sp = stackSave();
+ try {
+ dynCall_viiiijii(index, a1, a2, a3, a4, a5, a6, a7, a8);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiijjii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiijjii(index, a1, a2, a3, a4, a5, a6, a7, a8, a9);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_jjiii(index, a1, a2, a3, a4, a5) {
+ var sp = stackSave();
+ try {
+ return dynCall_jjiii(index, a1, a2, a3, a4, a5);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_j(index) {
+ var sp = stackSave();
+ try {
+ return dynCall_j(index);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_iiiiij(index, a1, a2, a3, a4, a5, a6) {
+ var sp = stackSave();
+ try {
+ return dynCall_iiiiij(index, a1, a2, a3, a4, a5, a6);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function invoke_jiiii(index, a1, a2, a3, a4) {
+ var sp = stackSave();
+ try {
+ return dynCall_jiiii(index, a1, a2, a3, a4);
+ } catch (e) {
+ stackRestore(sp);
+ if (e !== e + 0) throw e;
+ _setThrew(1, 0);
+ }
+ }
+
+ function applySignatureConversions(wasmExports) {
+ wasmExports = Object.assign({}, wasmExports);
+ var makeWrapper_p = (f) => () => f() >>> 0;
+ var makeWrapper_pp = (f) => (a0) => f(a0) >>> 0;
+ var makeWrapper_ppp = (f) => (a0, a1) => f(a0, a1) >>> 0;
+ wasmExports["pthread_self"] = makeWrapper_p(wasmExports["pthread_self"]);
+ wasmExports["malloc"] = makeWrapper_pp(wasmExports["malloc"]);
+ wasmExports["__errno_location"] = makeWrapper_p(
+ wasmExports["__errno_location"],
+ );
+ wasmExports["emscripten_builtin_memalign"] = makeWrapper_ppp(
+ wasmExports["emscripten_builtin_memalign"],
+ );
+ wasmExports["__getTypeName"] = makeWrapper_pp(
+ wasmExports["__getTypeName"],
+ );
+ wasmExports["stackSave"] = makeWrapper_p(wasmExports["stackSave"]);
+ wasmExports["stackAlloc"] = makeWrapper_pp(wasmExports["stackAlloc"]);
+ return wasmExports;
+ }
+
+ function intArrayFromBase64(s) {
+ if (typeof ENVIRONMENT_IS_NODE != "undefined" && ENVIRONMENT_IS_NODE) {
+ var buf = Buffer.from(s, "base64");
+ return new Uint8Array(buf.buffer, buf.byteOffset, buf.length);
+ }
+ var decoded = atob(s);
+ var bytes = new Uint8Array(decoded.length);
+ for (var i = 0; i < decoded.length; ++i) {
+ bytes[i] = decoded.charCodeAt(i);
+ }
+ return bytes;
+ }
+
+ function tryParseAsDataURI(filename) {
+ if (!isDataURI(filename)) {
+ return;
+ }
+ return intArrayFromBase64(filename.slice(dataURIPrefix.length));
+ }
+
+ Module["addRunDependency"] = addRunDependency;
+
+ Module["removeRunDependency"] = removeRunDependency;
+
+ Module["FS_createPath"] = FS.createPath;
+
+ Module["FS_createLazyFile"] = FS.createLazyFile;
+
+ Module["FS_createDevice"] = FS.createDevice;
+
+ Module["wasmMemory"] = wasmMemory;
+
+ Module["keepRuntimeAlive"] = keepRuntimeAlive;
+
+ Module["ExitStatus"] = ExitStatus;
+
+ Module["FS_createPreloadedFile"] = FS.createPreloadedFile;
+
+ Module["FS_createDataFile"] = FS.createDataFile;
+
+ Module["FS_unlink"] = FS.unlink;
+
+ Module["PThread"] = PThread;
+
+ Module["FS_readdir"] = FS.readdir;
+
+ Module["FS_analyzePath"] = FS.analyzePath;
+
+ var calledRun;
+
+ dependenciesFulfilled = function runCaller() {
+ if (!calledRun) run();
+ if (!calledRun) dependenciesFulfilled = runCaller;
+ };
+
+ function run() {
+ if (runDependencies > 0) {
+ return;
+ }
+ if (ENVIRONMENT_IS_PTHREAD) {
+ readyPromiseResolve(Module);
+ initRuntime();
+ startWorker(Module);
+ return;
+ }
+ preRun();
+ if (runDependencies > 0) {
+ return;
+ }
+ function doRun() {
+ if (calledRun) return;
+ calledRun = true;
+ Module["calledRun"] = true;
+ if (ABORT) return;
+ initRuntime();
+ readyPromiseResolve(Module);
+ if (Module["onRuntimeInitialized"]) Module["onRuntimeInitialized"]();
+ postRun();
+ }
+ if (Module["setStatus"]) {
+ Module["setStatus"]("Running...");
+ setTimeout(function () {
+ setTimeout(function () {
+ Module["setStatus"]("");
+ }, 1);
+ doRun();
+ }, 1);
+ } else {
+ doRun();
+ }
+ }
+
+ if (Module["preInit"]) {
+ if (typeof Module["preInit"] == "function")
+ Module["preInit"] = [Module["preInit"]];
+ while (Module["preInit"].length > 0) {
+ Module["preInit"].pop()();
+ }
+ }
+
+ run();
+
+ return moduleArg.ready;
+ };
+})();
+if (typeof exports === "object" && typeof module === "object")
+ module.exports = getUsdModule;
+else if (typeof define === "function" && define["amd"])
+ define([], () => getUsdModule);
+
+globalThis["NEEDLE:USD:GET"] = getUsdModule;
diff --git a/usd-wasm/src/bindings-debug/emHdBindings.worker.js b/usd-wasm/src/bindings-debug/emHdBindings.worker.js
new file mode 100644
index 0000000..b43d548
--- /dev/null
+++ b/usd-wasm/src/bindings-debug/emHdBindings.worker.js
@@ -0,0 +1,181 @@
+/**
+ * @license
+ * Copyright 2015 The Emscripten Authors
+ * SPDX-License-Identifier: MIT
+ */
+
+// Pthread Web Worker startup routine:
+// This is the entry point file that is loaded first by each Web Worker
+// that executes pthreads on the Emscripten application.
+
+'use strict';
+
+var Module = {};
+
+// Node.js support
+var ENVIRONMENT_IS_NODE = typeof process == 'object' && typeof process.versions == 'object' && typeof process.versions.node == 'string';
+if (ENVIRONMENT_IS_NODE) {
+ // Create as web-worker-like an environment as we can.
+
+ var nodeWorkerThreads = require('worker_threads');
+
+ var parentPort = nodeWorkerThreads.parentPort;
+
+ parentPort.on('message', (data) => onmessage({ data: data }));
+
+ var fs = require('fs');
+
+ Object.assign(global, {
+ self: global,
+ require,
+ Module,
+ location: {
+ href: __filename
+ },
+ Worker: nodeWorkerThreads.Worker,
+ importScripts: (f) => (0, eval)(fs.readFileSync(f, 'utf8') + '//# sourceURL=' + f),
+ postMessage: (msg) => parentPort.postMessage(msg),
+ performance: global.performance || { now: Date.now },
+ });
+}
+
+// Thread-local guard variable for one-time init of the JS state
+var initializedJS = false;
+
+function threadPrintErr() {
+ var text = Array.prototype.slice.call(arguments).join(' ');
+ // See https://github.com/emscripten-core/emscripten/issues/14804
+ if (ENVIRONMENT_IS_NODE) {
+ fs.writeSync(2, text + '\n');
+ return;
+ }
+ console.error(text);
+}
+function threadAlert() {
+ var text = Array.prototype.slice.call(arguments).join(' ');
+ postMessage({cmd: 'alert', text, threadId: Module['_pthread_self']()});
+}
+var err = threadPrintErr;
+self.alert = threadAlert;
+
+Module['instantiateWasm'] = (info, receiveInstance) => {
+ // Instantiate from the module posted from the main thread.
+ // We can just use sync instantiation in the worker.
+ var module = Module['wasmModule'];
+ // We don't need the module anymore; new threads will be spawned from the main thread.
+ Module['wasmModule'] = null;
+ var instance = new WebAssembly.Instance(module, info);
+ // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193,
+ // the above line no longer optimizes out down to the following line.
+ // When the regression is fixed, we can remove this if/else.
+ return receiveInstance(instance);
+}
+
+// Turn unhandled rejected promises into errors so that the main thread will be
+// notified about them.
+self.onunhandledrejection = (e) => {
+ throw e.reason || e;
+};
+
+function handleMessage(e) {
+ try {
+ if (e.data.cmd === 'load') { // Preload command that is called once per worker to parse and load the Emscripten code.
+
+ // Until we initialize the runtime, queue up any further incoming messages.
+ let messageQueue = [];
+ self.onmessage = (e) => messageQueue.push(e);
+
+ // And add a callback for when the runtime is initialized.
+ self.startWorker = (instance) => {
+ Module = instance;
+ // Notify the main thread that this thread has loaded.
+ postMessage({ 'cmd': 'loaded' });
+ // Process any messages that were queued before the thread was ready.
+ for (let msg of messageQueue) {
+ handleMessage(msg);
+ }
+ // Restore the real message handler.
+ self.onmessage = handleMessage;
+ };
+
+ // Module and memory were sent from main thread
+ Module['wasmModule'] = e.data.wasmModule;
+
+ // Use `const` here to ensure that the variable is scoped only to
+ // that iteration, allowing safe reference from a closure.
+ for (const handler of e.data.handlers) {
+ Module[handler] = (...args) => {
+ postMessage({ cmd: 'callHandler', handler, args: args });
+ }
+ }
+
+ Module['wasmMemory'] = e.data.wasmMemory;
+
+ Module['buffer'] = Module['wasmMemory'].buffer;
+
+ Module['ENVIRONMENT_IS_PTHREAD'] = true;
+
+ if (typeof e.data.urlOrBlob == 'string') {
+ importScripts(e.data.urlOrBlob);
+ } else {
+ var objectUrl = URL.createObjectURL(e.data.urlOrBlob);
+ importScripts(objectUrl);
+ URL.revokeObjectURL(objectUrl);
+ }
+ getUsdModule(Module);
+ } else if (e.data.cmd === 'run') {
+ // Pass the thread address to wasm to store it for fast access.
+ Module['__emscripten_thread_init'](e.data.pthread_ptr, /*is_main=*/0, /*is_runtime=*/0, /*can_block=*/1);
+
+ // Await mailbox notifications with `Atomics.waitAsync` so we can start
+ // using the fast `Atomics.notify` notification path.
+ Module['__emscripten_thread_mailbox_await'](e.data.pthread_ptr);
+
+ // Also call inside JS module to set up the stack frame for this pthread in JS module scope
+ Module['establishStackSpace']();
+ Module['PThread'].receiveObjectTransfer(e.data);
+ Module['PThread'].threadInitTLS();
+
+ if (!initializedJS) {
+ // Embind must initialize itself on all threads, as it generates support JS.
+ // We only do this once per worker since they get reused
+ Module['__embind_initialize_bindings']();
+ initializedJS = true;
+ }
+
+ try {
+ Module['invokeEntryPoint'](e.data.start_routine, e.data.arg);
+ } catch(ex) {
+ if (ex != 'unwind') {
+ // The pthread "crashed". Do not call `_emscripten_thread_exit` (which
+ // would make this thread joinable). Instead, re-throw the exception
+ // and let the top level handler propagate it back to the main thread.
+ throw ex;
+ }
+ }
+ } else if (e.data.cmd === 'cancel') { // Main thread is asking for a pthread_cancel() on this thread.
+ if (Module['_pthread_self']()) {
+ Module['__emscripten_thread_exit'](-1);
+ }
+ } else if (e.data.target === 'setimmediate') {
+ // no-op
+ } else if (e.data.cmd === 'checkMailbox') {
+ if (initializedJS) {
+ Module['checkMailbox']();
+ }
+ } else if (e.data.cmd) {
+ // The received message looks like something that should be handled by this message
+ // handler, (since there is a e.data.cmd field present), but is not one of the
+ // recognized commands:
+ err(`worker.js received unknown command ${e.data.cmd}`);
+ err(e.data);
+ }
+ } catch(ex) {
+ if (Module['__emscripten_thread_crashed']) {
+ Module['__emscripten_thread_crashed']();
+ }
+ throw ex;
+ }
+};
+
+self.onmessage = handleMessage;
diff --git a/usd-wasm/src/bindings/emHdBindings.js b/usd-wasm/src/bindings/emHdBindings.js
index 0df0321..e9ac7af 100644
--- a/usd-wasm/src/bindings/emHdBindings.js
+++ b/usd-wasm/src/bindings/emHdBindings.js
@@ -8,7 +8,7 @@ var getUsdModule = ((args) => {
// module overrides can be supplied here
locateFile: (path, prefix) => {
if (!prefix && _scriptDir) prefix = _scriptDir.substr(0, _scriptDir.lastIndexOf('/') + 1);
- return prefix + path;
+ return prefix + path;
},
...args
}) {
@@ -10163,8 +10163,8 @@ var getUsdModule = ((args) => {
(_asyncify_start_rewind = wasmExports["Ug"])(a0);
var _asyncify_stop_rewind = () =>
(_asyncify_stop_rewind = wasmExports["Vg"])();
- var ___start_em_js = (Module["___start_em_js"] = 3895468);
- var ___stop_em_js = (Module["___stop_em_js"] = 3896792);
+ var ___start_em_js = (Module["___start_em_js"] = 3890876);
+ var ___stop_em_js = (Module["___stop_em_js"] = 3892200);
function invoke_iii(index, a1, a2) {
var sp = stackSave();
try {
@@ -12019,4 +12019,4 @@ if (typeof exports === "object" && typeof module === "object")
else if (typeof define === "function" && define["amd"])
define([], () => getUsdModule);
-globalThis["NEEDLE:USD:GET"] = getUsdModule;
\ No newline at end of file
+globalThis["NEEDLE:USD:GET"] = getUsdModule;
diff --git a/usd-wasm/src/bindings/emHdBindings.wasm b/usd-wasm/src/bindings/emHdBindings.wasm
index 03f2b21..38f0c68 100644
Binary files a/usd-wasm/src/bindings/emHdBindings.wasm and b/usd-wasm/src/bindings/emHdBindings.wasm differ