diff --git a/Makefile b/Makefile index eda6e5fb..9de94d45 100644 --- a/Makefile +++ b/Makefile @@ -172,12 +172,19 @@ generate-go: # Generate clients from the following swagger files: # System Layout Service: ./pkg/sls-client/openapi.yaml -generate-swagger: bin/swagger-codegen-cli.jar - java -jar bin/swagger-codegen-cli.jar generate -i ./pkg/sls-client/openapi.yaml -l go -o ./pkg/sls-client/ -DpackageName=sls_client +generate-swagger-sls-client: bin/swagger-codegen-cli.jar + java -jar bin/swagger-codegen-cli.jar generate -i ./pkg/sls-client/openapi.yaml -l go -o ./pkg/sls-client/ -DpackageName=sls_client -t ./pkg/sls-client/templates go fmt ./pkg/sls-client/... goimports -w ./pkg/sls-client -generate: generate-swagger generate-go +# Generate clients from the following swagger files: +# Hardware State Manager: ./pkg/hsm-client/openapi.yaml +generate-swagger-hsm-client: bin/swagger-codegen-cli.jar + java -jar bin/swagger-codegen-cli.jar generate -i ./pkg/hsm-client/openapi.yaml -l go -o ./pkg/hsm-client/ -DpackageName=hsm_client + go fmt ./pkg/hsm-client/... + goimports -w ./pkg/hsm-client + +generate: generate-swagger-sls-client generate-swagger-hsm-client generate-go # Jenkins doesn't have java installed, so the generate target fails to run bin: diff --git a/cmd/cabinet/add_cabinet.go b/cmd/cabinet/add_cabinet.go index 4050e75c..055f4dcc 100644 --- a/cmd/cabinet/add_cabinet.go +++ b/cmd/cabinet/add_cabinet.go @@ -79,8 +79,16 @@ func addCabinet(cmd *cobra.Command, args []string) error { } } + // Push all the CLI flags that were provided into a generic map + // TODO Need to figure out how to specify to unset something + // Right now the build metadata function in the CSM provider will + // unset options if nil is passed in. + cabinetMetadata := map[string]interface{}{ + "vlanID": vlanId, + } + // Add the cabinet to the inventory using domain methods - result, err := d.AddCabinet(cmd.Context(), args[0], cabinetNumber) + result, err := d.AddCabinet(cmd.Context(), args[0], cabinetNumber, cabinetMetadata) if errors.Is(err, provider.ErrDataValidationFailure) { // TODO the following should probably suggest commands to fix the issue? log.Error().Msgf("Inventory data validation errors encountered") diff --git a/cmd/init.go b/cmd/init.go index e5c93e65..21986fe7 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -42,6 +42,7 @@ func setupLogging() { // Default level for this example is info, unless debug flag is present zerolog.SetGlobalLevel(zerolog.InfoLevel) // Fancy, human-friendly console logger (but slower) + // TODO Set no-color based off a flag log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) if Debug { // enable debug output globally diff --git a/cmd/session/init.go b/cmd/session/init.go index 57343e7e..aee2210a 100644 --- a/cmd/session/init.go +++ b/cmd/session/init.go @@ -24,6 +24,7 @@ func init() { root.SessionCmd.AddCommand(SessionStopCmd) root.SessionCmd.AddCommand(SessionStatusCmd) root.SessionCmd.AddCommand(SessionSummaryCmd) + root.SessionCmd.AddCommand(SessionImportCmd) // Session start flags // TODO need a quick simulation environment flag diff --git a/cmd/session/session_import.go b/cmd/session/session_import.go new file mode 100644 index 00000000..71fea14d --- /dev/null +++ b/cmd/session/session_import.go @@ -0,0 +1,61 @@ +package session + +import ( + "fmt" + "os" + + root "github.com/Cray-HPE/cani/cmd" + "github.com/Cray-HPE/cani/internal/domain" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +// SessionStopCmd represents the session stop command +var SessionImportCmd = &cobra.Command{ + Use: "import", + Short: "TODO THIS IS JUST A SHIM COMMAND", + Long: `TODO THIS IS JUST A SHIM COMMAND`, + SilenceUsage: true, // Errors are more important than the usage + PersistentPreRunE: DatastoreExists, // A session must be active to write to a datastore + RunE: importSession, + // PersistentPostRunE: writeSession, +} + +// stopSession stops a session if one exists +func importSession(cmd *cobra.Command, args []string) error { + // Setup profiling + // f, err := os.Create("cpu_profile") + // if err != nil { + // panic(err) + // } + + // pprof.StartCPUProfile(f) + // defer pprof.StopCPUProfile() + + ds := root.Conf.Session.DomainOptions.DatastorePath + providerName := root.Conf.Session.DomainOptions.Provider + d, err := domain.New(root.Conf.Session.DomainOptions) + if err != nil { + return err + } + + if root.Conf.Session.Active { + // Check that the datastore exists before proceeding since we cannot continue without it + _, err := os.Stat(ds) + if err != nil { + return fmt.Errorf("Session is STOPPED with provider '%s' but datastore '%s' does not exist", providerName, ds) + } + log.Info().Msgf("Session is STOPPED") + } else { + log.Info().Msgf("Session with provider '%s' and datastore '%s' is already STOPPED", providerName, ds) + } + + log.Info().Msgf("Committing changes to session") + + // Commit the external inventory + if err := d.Import(cmd.Context()); err != nil { + return err + } + + return nil +} diff --git a/cmd/session/session_start.go b/cmd/session/session_start.go index c3c556d3..7a165808 100644 --- a/cmd/session/session_start.go +++ b/cmd/session/session_start.go @@ -96,7 +96,7 @@ func startSession(cmd *cobra.Command, args []string) error { } // Validate the external inventory - result, err := root.Conf.Session.Domain.Validate(cmd.Context()) + result, err := root.Conf.Session.Domain.Validate(cmd.Context(), false) if errors.Is(err, provider.ErrDataValidationFailure) { // TODO the following should probably suggest commands to fix the issue? log.Error().Msgf("Inventory data validation errors encountered") diff --git a/cmd/validate.go b/cmd/validate.go index 553f3ea8..71706212 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -51,7 +51,7 @@ func validateInventory(cmd *cobra.Command, args []string) error { return err } // Validate the external inventory - result, err := d.Validate(cmd.Context()) + result, err := d.Validate(cmd.Context(), true) if errors.Is(err, provider.ErrDataValidationFailure) { // TODO the following should probably suggest commands to fix the issue? log.Error().Msgf("Inventory data validation errors encountered") diff --git a/go.mod b/go.mod index 1e7a623e..e44f5b2c 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/Cray-HPE/hms-sls/v2 v2.1.0 - github.com/Cray-HPE/hms-xname v1.1.0 + github.com/Cray-HPE/hms-xname v1.1.1-0.20230602152417-25bcdeda83c9 github.com/antihax/optional v1.0.0 github.com/fatih/color v1.13.0 github.com/google/uuid v1.3.0 @@ -15,6 +15,7 @@ require ( github.com/santhosh-tekuri/jsonschema/v5 v5.3.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.5.1 golang.org/x/oauth2 v0.8.0 gopkg.in/yaml.v3 v3.0.1 inet.af/netaddr v0.0.0-20220617031823-097006376321 @@ -23,6 +24,7 @@ require ( require ( github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -31,6 +33,7 @@ require ( github.com/kr/pretty v0.3.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.18 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect @@ -40,4 +43,5 @@ require ( google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v2 v2.2.7 // indirect ) diff --git a/go.sum b/go.sum index cf4d3e2e..6cca9fb5 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,9 @@ github.com/Cray-HPE/hms-base/v2 v2.0.1/go.mod h1:Mq+Ao3q4YtNZJZ1ly9wnEIKyvc3+QaA github.com/Cray-HPE/hms-s3 v1.9.2/go.mod h1:p5pVsMDeOdXKssd9qyFtXo4ztrn2lhD04nrO8+NOi7g= github.com/Cray-HPE/hms-sls/v2 v2.1.0 h1:NSHlsITIA2pXajnG4kuoZz59+StYWKSLhDb7lkmYqa0= github.com/Cray-HPE/hms-sls/v2 v2.1.0/go.mod h1:+MSzZViGwiVaip5pZhbrtFTv/8uTXeGrjSnsJMlU8uw= -github.com/Cray-HPE/hms-xname v1.1.0 h1:y/JEM2PbBaD8VbU7u85+/YIugf1Evji59z5IeSffeO8= github.com/Cray-HPE/hms-xname v1.1.0/go.mod h1:3A70QF0ddmkt/nz0jis5o8UIB4zAmsgsUiN71dr97n4= +github.com/Cray-HPE/hms-xname v1.1.1-0.20230602152417-25bcdeda83c9 h1:mL9LycLicBSk0ROgwqqTy+CQOLME5RDH0mb/7BW0ib8= +github.com/Cray-HPE/hms-xname v1.1.1-0.20230602152417-25bcdeda83c9/go.mod h1:XKdjQSzoTps5KDOE8yWojBTAWASGaS6LfRrVDxwTQO8= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= @@ -77,6 +78,7 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.0.0-20200620013148-b91950f658ec/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -281,6 +283,7 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -318,6 +321,7 @@ github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= @@ -647,6 +651,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/domain/blade.go b/internal/domain/blade.go index 36511401..c14939c9 100644 --- a/internal/domain/blade.go +++ b/internal/domain/blade.go @@ -120,11 +120,12 @@ func (d *Domain) AddBlade(ctx context.Context, deviceTypeSlug string, cabinetOrd locationOrdinal := hardwareBuildOut.OrdinalPath[len(hardwareBuildOut.OrdinalPath)-1] hardware := inventory.Hardware{ - ID: hardwareBuildOut.ID, - Parent: hardwareBuildOut.ParentID, - Type: hardwareBuildOut.DeviceType.HardwareType, - Vendor: hardwareBuildOut.DeviceType.Manufacturer, - Model: hardwareBuildOut.DeviceType.Model, + ID: hardwareBuildOut.ID, + Parent: hardwareBuildOut.ParentID, + Type: hardwareBuildOut.DeviceType.HardwareType, + DeviceTypeSlug: hardwareBuildOut.DeviceType.Slug, + Vendor: hardwareBuildOut.DeviceType.Manufacturer, + Model: hardwareBuildOut.DeviceType.Model, LocationOrdinal: &locationOrdinal, diff --git a/internal/domain/cabinet.go b/internal/domain/cabinet.go index 53add56a..046399e3 100644 --- a/internal/domain/cabinet.go +++ b/internal/domain/cabinet.go @@ -13,7 +13,7 @@ import ( ) // AddCabinet adds a cabinet to the inventory -func (d *Domain) AddCabinet(ctx context.Context, deviceTypeSlug string, cabinetOrdinal int) (AddHardwareResult, error) { +func (d *Domain) AddCabinet(ctx context.Context, deviceTypeSlug string, cabinetOrdinal int, metadata map[string]interface{}) (AddHardwareResult, error) { // Validate provided cabinet exists // Craft the path to the cabinet cabinetLocationPath := inventory.LocationPath{ @@ -73,17 +73,23 @@ func (d *Domain) AddCabinet(ctx context.Context, deviceTypeSlug string, cabinetO locationOrdinal := hardwareBuildOut.OrdinalPath[len(hardwareBuildOut.OrdinalPath)-1] hardware := inventory.Hardware{ - ID: hardwareBuildOut.ID, - Parent: hardwareBuildOut.ParentID, - Type: hardwareBuildOut.DeviceType.HardwareType, - Vendor: hardwareBuildOut.DeviceType.Manufacturer, - Model: hardwareBuildOut.DeviceType.Model, + ID: hardwareBuildOut.ID, + Parent: hardwareBuildOut.ParentID, + Type: hardwareBuildOut.DeviceType.HardwareType, + DeviceTypeSlug: hardwareBuildOut.DeviceType.Slug, + Vendor: hardwareBuildOut.DeviceType.Manufacturer, + Model: hardwareBuildOut.DeviceType.Model, LocationOrdinal: &locationOrdinal, Status: inventory.HardwareStatusStaged, } + // Ask the inventory provider to craft a metadata object for this information + if err := d.externalInventoryProvider.BuildHardwareMetadata(&hardware, metadata); err != nil { + return AddHardwareResult{}, err + } + log.Debug().Any("id", hardware.ID).Msg("Hardware") log.Debug().Str("path", hardwareBuildOut.LocationPathString()).Msg("Hardware Build out") diff --git a/internal/domain/domain.go b/internal/domain/domain.go index 1a9de811..91c796dc 100644 --- a/internal/domain/domain.go +++ b/internal/domain/domain.go @@ -63,7 +63,7 @@ func New(opts *NewOpts) (*Domain, error) { // Determine which external inventory provider to use switch inventoryProvider { case inventory.CSMProvider: - domain.externalInventoryProvider, err = csm.New(&opts.CsmOptions) + domain.externalInventoryProvider, err = csm.New(&opts.CsmOptions, domain.hardwareTypeLibrary) if err != nil { return nil, errors.Join( fmt.Errorf("failed to initialize CSM external inventory provider"), diff --git a/internal/domain/import.go b/internal/domain/import.go new file mode 100644 index 00000000..daaa3839 --- /dev/null +++ b/internal/domain/import.go @@ -0,0 +1,9 @@ +package domain + +import ( + "context" +) + +func (d *Domain) Import(ctx context.Context) error { + return d.externalInventoryProvider.Import(ctx, d.datastore) +} diff --git a/internal/domain/misc.go b/internal/domain/misc.go index 741ed51d..a52cb325 100644 --- a/internal/domain/misc.go +++ b/internal/domain/misc.go @@ -25,12 +25,12 @@ type ValidateResult struct { ProviderValidationErrors map[uuid.UUID]provider.HardwareValidationResult } -func (d *Domain) Validate(ctx context.Context) (ValidateResult, error) { +func (d *Domain) Validate(ctx context.Context, checkRequiredData bool) (ValidateResult, error) { var result ValidateResult // Validate the current state of CANI's inventory data against the provider plugin // for provider specific data. - if failedValidations, err := d.externalInventoryProvider.ValidateInternal(ctx, d.datastore, true); len(failedValidations) > 0 { + if failedValidations, err := d.externalInventoryProvider.ValidateInternal(ctx, d.datastore, checkRequiredData); len(failedValidations) > 0 { result.ProviderValidationErrors = failedValidations return result, provider.ErrDataValidationFailure } else if err != nil { diff --git a/internal/inventory/datastore.go b/internal/inventory/datastore.go index 0cd6c628..1e514305 100644 --- a/internal/inventory/datastore.go +++ b/internal/inventory/datastore.go @@ -34,4 +34,10 @@ type Datastore interface { GetSystem(hardware Hardware) (Hardware, error) // Not yet implemented until multiple systems are supported // TODO for search properties + + // Clone creates a in-memory version of the datastore to perform location operations + Clone() (Datastore, error) + + // Merge the contents of the remote datastore (most likely a in-memory one with changes) + Merge(Datastore) error } diff --git a/internal/inventory/datastore_json.go b/internal/inventory/datastore_json.go index cc17f70b..9000f475 100644 --- a/internal/inventory/datastore_json.go +++ b/internal/inventory/datastore_json.go @@ -25,7 +25,7 @@ type DatastoreJSON struct { logFilePath string } -func NewDatastoreJSON(dataFilePath string, logfilepath string, provider Provider) (*DatastoreJSON, error) { +func NewDatastoreJSON(dataFilePath string, logfilepath string, provider Provider) (Datastore, error) { datastore := &DatastoreJSON{ dataFilePath: dataFilePath, logFilePath: logfilepath, @@ -105,6 +105,31 @@ func NewDatastoreJSON(dataFilePath string, logfilepath string, provider Provider return datastore, nil } +func NewDatastoreInMemory(provider Provider) (*DatastoreJSON, error) { + datastore := &DatastoreJSON{} + + // Generate a UUID for a new top-level "System" object + system := uuid.New() + // A system ordinal is required for the top-level system object and is arbitrarily set to 0 + ordinal := 0 + // Create a config with default values since one does not exist + datastore.inventory = &Inventory{ + SchemaVersion: SchemaVersionV1Alpha1, + Provider: provider, + Hardware: map[uuid.UUID]Hardware{ + // NOTE: At present, we only allow ONE system in the inventory, but leaving the door open for multiple systems + system: { + Type: hardwaretypes.System, // The top-level object is a hardwaretypes.System + ID: system, // ID is the same as the key for the top-level system object to prevent a uuid.Nil + Parent: uuid.Nil, // Parent should be nil to prevent illegitimate children + LocationOrdinal: &ordinal, + }, + }, + } + + return datastore, nil +} + func (dj *DatastoreJSON) GetSchemaVersion() (SchemaVersion, error) { dj.inventoryLock.RLock() defer dj.inventoryLock.RUnlock() @@ -132,6 +157,11 @@ func (dj *DatastoreJSON) InventoryProvider() (Provider, error) { // Flush writes the current inventory to the datastore func (dj *DatastoreJSON) Flush() error { + if dj.dataFilePath == "" { + // If running in in-memory mode there is nothing to flush + return nil + } + dj.inventoryLock.RLock() defer dj.inventoryLock.RUnlock() @@ -150,6 +180,62 @@ func (dj *DatastoreJSON) Flush() error { return nil } +func (dj *DatastoreJSON) Clone() (Datastore, error) { + dj.inventoryLock.RLock() + defer dj.inventoryLock.RUnlock() + + result, err := NewDatastoreInMemory(dj.inventory.Provider) + if err != nil { + return nil, errors.Join(fmt.Errorf("failed to create in memory datastore"), err) + } + + // Deep copy the hardware information into the datastore + // TODO this is a hack + raw, err := json.Marshal(dj.inventory.Hardware) + if err != nil { + return nil, err + } + result.inventory.Hardware = nil + if err := json.Unmarshal(raw, &result.inventory.Hardware); err != nil { + return nil, err + } + + if err := result.calculateDerivedFields(); err != nil { + return nil, err + } + + return result, nil +} + +func (dj *DatastoreJSON) Merge(otherDJ Datastore) error { + dj.inventoryLock.Lock() + defer dj.inventoryLock.Unlock() + + otherAllHardware, err := otherDJ.List() + if err != nil { + return errors.Join(fmt.Errorf("failed to retrieve inventory from other datastore"), err) + } + + // Identify hardware to remove + hardwareToDelete := []uuid.UUID{} + for id := range dj.inventory.Hardware { + if _, exists := otherAllHardware.Hardware[id]; !exists { + hardwareToDelete = append(hardwareToDelete, id) + } + } + // Remove deleted hardware + for _, id := range hardwareToDelete { + delete(dj.inventory.Hardware, id) + } + + // Update or add hardware + for id, otherHardware := range otherAllHardware.Hardware { + dj.inventory.Hardware[id] = otherHardware + } + + return nil +} + func (dj *DatastoreJSON) calculateDerivedFields() (err error) { // // Update location path @@ -309,7 +395,7 @@ func (dj *DatastoreJSON) Update(hardware *Hardware) error { dj.logTransaction("UPDATE", hardware.ID.String(), nil, nil) // Update derived fields if the parent ID is different than the old value - if oldHardware.Parent != hardware.ID { + if oldHardware.Parent != hardware.Parent { log.Debug().Msgf("Detected parent ID change for (%s) from (%s) to (%s)", hardware.ID, oldHardware.Parent, hardware.ID) if err := dj.calculateDerivedFields(); err != nil { return errors.Join( @@ -415,16 +501,28 @@ func (dj *DatastoreJSON) getLocation(hardware Hardware) (LocationPath, error) { } // Build up an element in the location path. - // Since the tree is being traversed bottom up, need to add each location token to the front of the slice - locationPath = append([]LocationToken{{ + locationPath = append(locationPath, LocationToken{ HardwareType: currentHardware.Type, Ordinal: *currentHardware.LocationOrdinal, - }}, locationPath...) + }) // Go the parent node next currentHardwareID = currentHardware.Parent } + // Reverse in place, since the tree was traversed bottom up + // This is more efficient than building prepending the location path, due to not + // needing to a lot of memory allocations and slice magic by adding an new element + // to the start of the slice every time we visit a new location. + // + // For loop + // Initial condition: Set i to beginning, and j to the end. + // Check: Continue if i is before j + // Advance: Move i forward, and j backward + for i, j := 0, len(locationPath)-1; i < j; i, j = i+1, j-1 { + locationPath[j], locationPath[i] = locationPath[i], locationPath[j] + } + return locationPath, nil } @@ -438,7 +536,7 @@ func (dj *DatastoreJSON) GetAtLocation(path LocationPath) (Hardware, error) { if len(path) == 0 { return Hardware{}, ErrEmptyLocationPath } - // log.Debug().Msgf("Getting %s", path.String()) + log.Trace().Msgf("GetAtLocation: Location Path: %s", path.String()) // // Traverse the tree to see if the hardware exists at the given location @@ -459,12 +557,13 @@ func (dj *DatastoreJSON) GetAtLocation(path LocationPath) (Hardware, error) { // Vist rest of the path for i, locationToken := range path[1:] { - // log.Debug().Msgf("GetAtLocation: Processing token %d of %d: '%s'", i+1, len(path), locationToken.String()) - // log.Debug().Msgf("GetAtLocation: Current ID %s", currentHardware.ID) + log.Trace().Msgf("GetAtLocation: Processing token %d of %d: '%s'", i+1, len(path), locationToken.String()) + log.Trace().Msgf("GetAtLocation: Current ID %s", currentHardware.ID) + // For each child of the current hardware object check to see if it foundMatch := false for _, childID := range currentHardware.Children { - // log.Debug().Msgf("GetAtLocation: Visiting Child (%s)", childID) + log.Trace().Msgf("GetAtLocation: Visiting Child (%s)", childID) // Get the hardware childHardware, ok := dj.inventory.Hardware[childID] if !ok { @@ -476,15 +575,15 @@ func (dj *DatastoreJSON) GetAtLocation(path LocationPath) (Hardware, error) { } if childHardware.LocationOrdinal == nil { - // log.Debug().Msgf("GetAtLocation: Child has no location ordinal set, skipping") + log.Trace().Msgf("GetAtLocation: Child has no location ordinal set, skipping") continue } - // log.Debug().Msgf("GetAtLocation: Child location token: %s:%d", childHardware.Type, *childHardware.LocationOrdinal) + log.Trace().Msgf("GetAtLocation: Child location token: %s:%d", childHardware.Type, *childHardware.LocationOrdinal) // Check to see if the location token matches if childHardware.Type == locationToken.HardwareType && *childHardware.LocationOrdinal == locationToken.Ordinal { // Found a match! - // log.Debug().Msgf("GetAtLocation: Child has matching location token") + log.Trace().Msgf("GetAtLocation: Child has matching location token") currentHardware = childHardware foundMatch = true break @@ -547,6 +646,11 @@ func (dj *DatastoreJSON) getChildren(id uuid.UUID) ([]Hardware, error) { // logTransaction logs a transaction to logger func (dj *DatastoreJSON) logTransaction(operation string, key string, value interface{}, err error) { + if dj.dataFilePath == "" { + // If running in in-memory mode there is currently no place to log + return + } + tl, err = os.OpenFile( dj.logFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, diff --git a/internal/inventory/model.go b/internal/inventory/model.go index 86661390..b7af020d 100644 --- a/internal/inventory/model.go +++ b/internal/inventory/model.go @@ -23,11 +23,12 @@ type Hardware struct { ID uuid.UUID Name string `json:"Name,omitempty" yaml:"Name,omitempty" default:"" usage:"Friendly name"` Type hardwaretypes.HardwareType `json:"Type,omitempty" yaml:"Type,omitempty" default:"" usage:"Type"` + DeviceTypeSlug string `json:"DeviceTypeSlug,omitempty" yaml:"DeviceTypeSlug,omitempty" default:"" usage:"Hardware Type Library Device slug"` Vendor string `json:"Vendor,omitempty" yaml:"Vendor,omitempty" default:"" usage:"Vendor"` Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty" default:"" usage:"Architecture"` Model string `json:"Model,omitempty" yaml:"Model,omitempty" default:"" usage:"Model"` Status HardwareStatus `json:"Status,omitempty" yaml:"Status,omitempty" default:"Staged" usage:"Hardware can be [staged, provisioned, decomissioned]"` - Properties interface{} `json:"Properties,omitempty" yaml:"Properties,omitempty" default:"" usage:"Properties"` + Properties map[string]interface{} `json:"Properties,omitempty" yaml:"Properties,omitempty" default:"" usage:"Properties"` Role string `json:"Role,omitempty" yaml:"Role,omitempty" default:"" usage:"Role"` SubRole string `json:"SubRole,omitempty" yaml:"SubRole,omitempty" default:"" usage:"SubRole"` Alias string `json:"Alias,omitempty" yaml:"Alias,omitempty" default:"" usage:"Alias"` @@ -54,9 +55,10 @@ type Provider string const ( // Define constants for lifecyle states - HardwareStatusStaged = HardwareStatus("staged") - HardwareStatusProvisioned = HardwareStatus("provisioned") - HardwareStatusDecomissioned = HardwareStatus("decomissioned") + HardwareStatusEmpty = HardwareStatus("empty") + HardwareStatusStaged = HardwareStatus("staged") + HardwareStatusProvisioned = HardwareStatus("provisioned") + HardwareStatusDecommissioned = HardwareStatus("decommissioned") // Schema and proivider names are constant SchemaVersionV1Alpha1 = SchemaVersion("v1alpha1") CSMProvider = Provider("csm") diff --git a/internal/provider/csm/csm.go b/internal/provider/csm/csm.go index a2016763..37951270 100644 --- a/internal/provider/csm/csm.go +++ b/internal/provider/csm/csm.go @@ -1,15 +1,13 @@ package csm import ( - "context" "fmt" "github.com/Cray-HPE/cani/cmd/taxonomy" - "github.com/Cray-HPE/cani/internal/inventory" "github.com/Cray-HPE/cani/pkg/hardwaretypes" + hsm_client "github.com/Cray-HPE/cani/pkg/hsm-client" sls_client "github.com/Cray-HPE/cani/pkg/sls-client" - "github.com/mitchellh/mapstructure" ) type NewOpts struct { @@ -56,10 +54,15 @@ type CSM struct { // System Configuration data ValidRoles []string ValidSubRoles []string + + hardwareLibrary *hardwaretypes.Library } -func New(opts *NewOpts) (*CSM, error) { - csm := &CSM{} +func New(opts *NewOpts, hardwareLibrary *hardwaretypes.Library) (*CSM, error) { + csm := &CSM{ + hardwareLibrary: hardwareLibrary, + } + // Setup HTTP client and context using csm options httpClient, _, err := opts.newClient() if err != nil { @@ -108,65 +111,3 @@ func New(opts *NewOpts) (*CSM, error) { csm.ValidSubRoles = opts.ValidSubRoles return csm, nil } - -// Import external inventory data into CANI's inventory format -func (csm *CSM) Import(ctx context.Context, datastore inventory.Datastore) error { - return fmt.Errorf("todo") - -} - -func (csm *CSM) BuildHardwareMetadata(cHardware *inventory.Hardware, rawProperties map[string]interface{}) error { - if cHardware.ProviderProperties == nil { - cHardware.ProviderProperties = map[string]interface{}{} - } - - switch cHardware.Type { - case hardwaretypes.Node: - // TODO do something interesting with the raw data, and convert it/validate it - properties := NodeMetadata{} // Create an empty one - if _, exists := cHardware.ProviderProperties["csm"]; exists { - // If one exists set it. - if err := mapstructure.Decode(cHardware.ProviderProperties["csm"], &properties); err != nil { - return err - } - } - // Make changes to the node metadata - // The keys of rawProperties need to match what is defined in ./cmd/node/update_node.go - if roleRaw, exists := rawProperties["role"]; exists { - if roleRaw == nil { - properties.Role = nil - } else { - properties.Role = StringPtr(roleRaw.(string)) - } - } - if subroleRaw, exists := rawProperties["subrole"]; exists { - if subroleRaw == nil { - properties.SubRole = nil - } else { - properties.SubRole = StringPtr(subroleRaw.(string)) - } - } - if nidRaw, exists := rawProperties["nid"]; exists { - if nidRaw == nil { - properties.Nid = nil - } else { - properties.Nid = IntPtr(nidRaw.(int)) - } - } - if aliasRaw, exists := rawProperties["alias"]; exists { - if aliasRaw == nil { - properties.Alias = nil - } else { - properties.Alias = StringPtr(aliasRaw.(string)) - } - } - - cHardware.ProviderProperties["csm"] = properties - - return nil - default: - // This hardware type doesn't have metadata for it right now - return nil - } - -} diff --git a/internal/provider/csm/import.go b/internal/provider/csm/import.go new file mode 100644 index 00000000..b04b8ece --- /dev/null +++ b/internal/provider/csm/import.go @@ -0,0 +1,863 @@ +package csm + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "reflect" + "regexp" + "sort" + "strconv" + "time" + + "github.com/Cray-HPE/cani/internal/inventory" + "github.com/Cray-HPE/cani/internal/provider/csm/sls" + "github.com/Cray-HPE/cani/pkg/hardwaretypes" + hsm_client "github.com/Cray-HPE/cani/pkg/hsm-client" + sls_client "github.com/Cray-HPE/cani/pkg/sls-client" + "github.com/Cray-HPE/hms-xname/xnames" + "github.com/Cray-HPE/hms-xname/xnametypes" + "github.com/google/uuid" + "github.com/rs/zerolog/log" +) + +// +// Other thoughts +// +// +// 1. Create a top level system object in SLS, this will serve at the main place to store the CANI metadata +// - Last time was imported by CANI +// - Version of CANI when the last import occured +// - SLS CANI Schema Level +// 2. For missing hardware in SLS like Mountain Cabinet RouterBMCs, add them from HSM state +// 3. Each hardware object should have the UUID of the assoicated CANI Hardware UUID that it is assoicated to. Hopefullt this will be 1-to-1 +// 4. If hardware is added to SLS without the special CANI metadata it can detected as being added outside the normal process +// 5. For hardware that doesn't exist in mountain cabinets (phantom nodes) either we mark thinks as absent as a CANI state (here is the logical data, but no physical data) +// or out right remove them, but that will break existing procedures. + +func loadJSON(path string, dest interface{}) error { + raw, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + return json.Unmarshal(raw, dest) +} + +func (csm *CSM) Import(ctx context.Context, datastore inventory.Datastore) error { + + // + // Retrieve current state from the system + // + slsDumpstate, _, err := csm.slsClient.DumpstateApi.DumpstateGet(ctx) + if err != nil { + return errors.Join(fmt.Errorf("failed to perform SLS dumpstate"), err) + } + + hsmStateComponents, _, err := csm.hsmClient.ComponentApi.DoComponentsGet(ctx, nil) + if err != nil { + return errors.Join(fmt.Errorf("failed to retrieve HSM State Components"), err) + } + + hsmHardwareInventory, _, err := csm.hsmClient.HWInventoryByLocationApi.DoHWInvByLocationGetAll(ctx, nil) + if err != nil { + return errors.Join(fmt.Errorf("failed to retrieve HSM State Components"), err) + } + + // + // HSM lookup tables + // + hsmStateComponentsMap := map[string]hsm_client.Component100Component{} + for _, hsmComponent := range hsmStateComponents.Components { + hsmStateComponentsMap[hsmComponent.ID] = hsmComponent + } + hsmHardwareInventoryMap := map[string]hsm_client.HwInventory100HwInventoryByLocation{} + for _, hsmHardware := range hsmHardwareInventory { + hsmHardwareInventoryMap[hsmHardware.ID] = hsmHardware + } + + tempDatastore, err := datastore.Clone() + if err != nil { + return errors.Join(fmt.Errorf("failed to clone datastore"), err) + } + + // Prune non-mountain hardware + slsDumpstate.Hardware, _ = sls.FilterHardware(slsDumpstate.Hardware, func(hardware sls_client.Hardware) (bool, error) { + return hardware.Class != sls_client.HardwareClassRiver, nil + }) + + // Get the system UUID + cSystem, err := tempDatastore.GetSystemZero() + if err != nil { + return errors.Join(fmt.Errorf("failed to get system:0 ID"), err) + } + + // Import related changes for SLS + // slsHardwareToAdd := map[string]sls_client.Hardware{} + slsHardwareToModify := map[string]sls_client.Hardware{} + // slsHardwareExists := map[string]sls_client.Hardware{} + + // CANI Hardware changes + // TODO + + // TODO Unable to POST this into SLS + // Check to see if a system object exists in the SLS dumpstate + // slsSystem, exists := slsDumpstate.Hardware["s0"] + // if !exists { + // log.Warn().Msgf("SLS does not contain a system object, creating one") + // + // slsSystem = sls.NewHardware(xnames.System{}, sls_client.HardwareClassRiver, sls_client.HardwareExtraPropertiesSystem{ + // CaniId: cSystem.ID.String(), + // CaniSlsSchemaVersion: "v1alpha1", // TODO make this a enum + // CaniLastModified: time.Now().UTC().String(), + // }) + + // slsHardwareToAdd[slsSystem.Xname] = slsSystem + // } + + // log.Info().Msgf("System: %v", slsSystem) + + // + // Import Cabinets and Chassis + // + allCabinets, _ := sls.FilterHardwareByType(slsDumpstate.Hardware, xnametypes.Cabinet) + allChassis, _ := sls.FilterHardwareByType(slsDumpstate.Hardware, xnametypes.Chassis) + allChassisBMCs, _ := sls.FilterHardwareByType(slsDumpstate.Hardware, xnametypes.ChassisBMC) + + // Find all cabinets and what chassis they have + cabinetChassisCounts := map[string][]int{} + for _, chassis := range allChassis { + chassisXname := xnames.FromStringToStruct[xnames.Chassis](chassis.Xname) + cabinet := chassisXname.Parent() + + cabinetChassisCounts[cabinet.String()] = append(cabinetChassisCounts[cabinet.String()], chassisXname.Chassis) + } + for cabinet, chassisOrdinals := range cabinetChassisCounts { + sort.Ints(chassisOrdinals) + log.Debug().Msgf("%s: %v - %v", cabinet, len(chassisOrdinals), chassisOrdinals) + } + + // Find all cabinets and build up HMN VLAN Mappings + cabinetHMNVlans := map[string]int{} + cabinetSubnetRegex := regexp.MustCompile(`cabinet_(\d+)`) + + // The networking data in SLS should be considered the source of truth for networking information + // instead of looking at the SLS hardware part of SLS + for _, networkName := range []string{"HMN_MTN", "HMN_RVR"} { + network, exists := slsDumpstate.Networks[networkName] + if !exists { + log.Warn().Msgf("SLS Network (%s) does not exist", networkName) + continue + } + + for _, subnet := range network.ExtraProperties.Subnets { + matches := cabinetSubnetRegex.FindStringSubmatch(subnet.Name) + if len(matches) != 2 { + log.Warn().Msgf("Skipping subnet (%s) in network (%s) for cabinet HMN Vlan lookup", subnet.Name, networkName) + continue + } + + cabinetXname := xnames.Cabinet{} + cabinetXname.Cabinet, err = strconv.Atoi(matches[1]) + if err != nil { + return errors.Join(fmt.Errorf("failed to extract cabinet number from subnet (%s)", subnet.Name), err) + } + + cabinetHMNVlans[cabinetXname.String()] = int(subnet.VlanID) + } + } + + for cabinet, vlan := range cabinetHMNVlans { + log.Debug().Msgf("Cabinet (%s) has HMN VLAN (%d)", cabinet, vlan) + } + + for _, slsCabinet := range allCabinets { + cabinetXname := xnames.FromStringToStruct[xnames.Cabinet](slsCabinet.Xname) + if cabinetXname == nil { + return fmt.Errorf("failed to parse cabinet xname (%s)", slsCabinet.Xname) + } + + locationPath, err := FromXname(cabinetXname) + if err != nil { + return errors.Join(fmt.Errorf("failed to build location path for xname (%v)", cabinetXname), err) + } + + // + // Stage 1: Determine of the cabinet is new or currently exists. If they don't exist push the required hardware data into the database + // + cCabinet, err := tempDatastore.GetAtLocation(locationPath) + if err == nil { + // Cabinet exists + log.Info().Msgf("Cabinet %s (%v) exists in datastore with ID (%s)", cabinetXname, locationPath, cCabinet.ID) + + // TODO Build metadata from sls data + } else if errors.Is(err, inventory.ErrHardwareNotFound) { + // Cabinet does not exist, which means it needs to be added + // TODO When reconstituting the CANI inventory (say it was lost), should we reuse existing IDs? + log.Info().Msgf("Cabinet %s does not exist in datastore at %s", cabinetXname, locationPath) + + deviceTypeSlug := "" + + switch slsCabinet.Class { + case sls_client.HardwareClassRiver: + deviceTypeSlug = "hpe-eia-cabinet" + case sls_client.HardwareClassHill: + if reflect.DeepEqual(cabinetChassisCounts[cabinetXname.String()], []int{1, 3}) { + deviceTypeSlug = "hpe-ex2000" + } else if reflect.DeepEqual(cabinetChassisCounts[cabinetXname.String()], []int{0}) { + deviceTypeSlug = "hpe-ex2500-1-liquid-cooled-chassis" + } else if reflect.DeepEqual(cabinetChassisCounts[cabinetXname.String()], []int{0, 1}) { + deviceTypeSlug = "hpe-ex2500-2-liquid-cooled-chassis" + } else if reflect.DeepEqual(cabinetChassisCounts[cabinetXname.String()], []int{0, 1, 2}) { + deviceTypeSlug = "hpe-ex2500-3-liquid-cooled-chassis" + } + case sls_client.HardwareClassMountain: + if reflect.DeepEqual(cabinetChassisCounts[cabinetXname.String()], []int{0, 1, 2, 3, 4, 5, 6, 7}) { + deviceTypeSlug = "hpe-ex4000" // TODO This is ambiguous with the EX3000 cabinet, for right now assume + } + default: + return fmt.Errorf("cabinet (%s) has unknown class (%s)", cabinetXname, slsCabinet.Class) + } + + if deviceTypeSlug == "" { + log.Warn().Msgf("Cabinet %s device type slug is unknown, ignoring", cabinetXname.String()) + continue + } else { + log.Info().Msgf("Cabinet %s device type slug is %s", cabinetXname.String(), deviceTypeSlug) + } + + // Now its time to build up what the hardware looks like + newHardware, err := csm.buildInventoryHardware(deviceTypeSlug, cabinetXname.Cabinet, cSystem.ID, inventory.HardwareStatusProvisioned) + if err != nil { + return errors.Join(fmt.Errorf("failed to build hardware for cabinet (%s)", cabinetXname.String()), err) + } + + // Push the new hardware into the datastore + for _, cHardware := range newHardware { + log.Info().Msgf("Hardware from cabinet %s: %s", cabinetXname.String(), cHardware.ID) + if err := tempDatastore.Add(&cHardware); err != nil { + return fmt.Errorf("failed to add hardware (%s) to in memory datastore", cHardware.ID) + } + } + + // Set cabinet metadata + cabinetMetadata := CabinetMetadata{} + if vlan, exists := cabinetHMNVlans[slsCabinet.Xname]; exists { + cabinetMetadata.HMNVlan = IntPtr(vlan) + } + + cCabinet, err = tempDatastore.GetAtLocation(locationPath) + if err != nil { + return errors.Join(fmt.Errorf("failed to query datastore for %s", locationPath), err) + } + + cCabinet.ProviderProperties = map[string]interface{}{ + "csm": cabinetMetadata, + } + + if err := tempDatastore.Update(&cCabinet); err != nil { + return fmt.Errorf("failed to update hardware (%s) in memory datastore", cCabinet.ID) + } + + } else { + // Error occurred + return errors.Join(fmt.Errorf("failed to query datastore"), err) + } + + // Update SLS metadata + slsCabinetEP, err := sls.DecodeExtraProperties[sls_client.HardwareExtraPropertiesCabinet](slsCabinet) + if err != nil { + return fmt.Errorf("failed to decode SLS hardware extra properties (%s)", slsCabinet.Xname) + } + + if slsCabinetEP.CaniId != cCabinet.ID.String() { + if len(slsCabinetEP.CaniId) != 0 { + log.Warn().Msgf("Detected CANI hardware ID change from %s to %s for SLS Hardware %s", slsCabinetEP.CaniId, cCabinet.ID, slsCabinet.Xname) + } + + // Add in CANI properties + slsCabinetEP.CaniId = cCabinet.ID.String() + slsCabinetEP.CaniSlsSchemaVersion = "v1alpha1" // TODO make this a enum + slsCabinetEP.CaniLastModified = time.Now().UTC().String() + + log.Info().Msgf("SLS extra properties changed for %s", slsCabinet.Xname) + + slsCabinet.ExtraProperties = slsCabinetEP + slsHardwareToModify[slsCabinet.Xname] = slsCabinet + } + } + + // + // Fix up Chassis SLS metadata + // + for _, slsHardware := range allChassis { + xname := xnames.FromString(slsHardware.Xname) + if xname == nil { + return fmt.Errorf("failed to parse xname (%s)", slsHardware.Xname) + } + + locationPath, err := FromXname(xname) + if err != nil { + return errors.Join(fmt.Errorf("failed to build location path for xname (%v)", xname), err) + } + + cHardware, err := tempDatastore.GetAtLocation(locationPath) + if err != nil { + return errors.Join(fmt.Errorf("failed to query datastore for %s", locationPath), err) + } + + // Update SLS metadata + slsEP, err := sls.DecodeExtraProperties[sls_client.HardwareExtraPropertiesChassis](slsHardware) + if err != nil { + return fmt.Errorf("failed to decode SLS hardware extra properties (%s)", slsHardware.Xname) + } + + if slsEP.CaniId != cHardware.ID.String() { + if len(slsEP.CaniId) != 0 { + log.Warn().Msgf("Detected CANI hardware ID change from %s to %s for SLS Hardware %s", slsEP.CaniId, cHardware.ID, slsHardware.Xname) + } + + // Add in CANI properties + slsEP.CaniId = cHardware.ID.String() + slsEP.CaniSlsSchemaVersion = "v1alpha1" // TODO make this a enum + slsEP.CaniLastModified = time.Now().UTC().String() + + log.Info().Msgf("SLS extra properties changed for %s", slsHardware.Xname) + + slsHardware.ExtraProperties = slsEP + slsHardwareToModify[slsHardware.Xname] = slsHardware + } + } + + // + // Fix up ChassisBMC SLS Metadata + // + for _, slsHardware := range allChassisBMCs { + xname := xnames.FromString(slsHardware.Xname) + if xname == nil { + return fmt.Errorf("failed to parse xname (%s)", slsHardware.Xname) + } + + locationPath, err := FromXname(xname) + if err != nil { + return errors.Join(fmt.Errorf("failed to build location path for xname (%v)", xname), err) + } + + cHardware, err := tempDatastore.GetAtLocation(locationPath) + if err != nil { + return errors.Join(fmt.Errorf("failed to query datastore for %s", locationPath), err) + } + + // Update SLS metadata + slsEP, err := sls.DecodeExtraProperties[sls_client.HardwareExtraPropertiesChassisBmc](slsHardware) + if err != nil { + return fmt.Errorf("failed to decode SLS hardware extra properties (%s)", slsHardware.Xname) + } + + if slsEP.CaniId != cHardware.ID.String() { + if len(slsEP.CaniId) != 0 { + log.Warn().Msgf("Detected CANI hardware ID change from %s to %s for SLS Hardware %s", slsEP.CaniId, cHardware.ID, slsHardware.Xname) + } + + // Add in CANI properties + slsEP.CaniId = cHardware.ID.String() + slsEP.CaniSlsSchemaVersion = "v1alpha1" // TODO make this a enum + slsEP.CaniLastModified = time.Now().UTC().String() + + log.Info().Msgf("SLS extra properties changed for %s", slsHardware.Xname) + + slsHardware.ExtraProperties = slsEP + slsHardwareToModify[slsHardware.Xname] = slsHardware + } + } + + // + // Import Nodes + // + allNodes, _ := sls.FilterHardwareByType(slsDumpstate.Hardware, xnametypes.Node) + + // 1. Find all slots holding blades (either currently populated or could be populated) from SLS + slsNodeBladeXnames := []xnames.ComputeModule{} + slsNodeBladesFound := map[xnames.ComputeModule][]xnames.NodeBMC{} + slsNodeBMCFound := map[xnames.NodeBMC]bool{} + for _, slsNode := range allNodes { + nodeXname := xnames.FromStringToStruct[xnames.Node](slsNode.Xname) + if nodeXname == nil { + return fmt.Errorf("failed to parse node xname (%s)", slsNode.Xname) + } + + // Node -> NodeBMC (Node Card) -> ComputeModule (Node Blade) + nodeBMCXname := nodeXname.Parent() + nodeBladeXname := nodeBMCXname.Parent() + + if slsNodeBMCFound[nodeBMCXname] { + // We have already discovered this node BMC, and we don't need to add it again + continue + } + + // Keep track that we have seem this BMC + slsNodeBMCFound[nodeBMCXname] = true + + if _, exists := slsNodeBladesFound[nodeBladeXname]; !exists { + // This is the first time we have seem this blade, lets add it to our list of node blade xnames + slsNodeBladeXnames = append(slsNodeBladeXnames, nodeBladeXname) + } + + // Keep track that we found this node BMC on this blade + slsNodeBladesFound[nodeBladeXname] = append(slsNodeBladesFound[nodeBladeXname], nodeBMCXname) + } + + // 1.1 Sort the found node blade xnames, so the output is nice to look at + for _, nodeBMCs := range slsNodeBladesFound { + sort.Slice(nodeBMCs, func(i, j int) bool { + return nodeBMCs[i].String() < nodeBMCs[j].String() + }) + } + sort.Slice(slsNodeBladeXnames, func(i, j int) bool { + return slsNodeBladeXnames[i].String() < slsNodeBladeXnames[j].String() + }) + + // 2. Find all slots holding blades from HSM, and identify hardware + nodeBladeDeviceSlugs := map[xnames.ComputeModule]string{} + for _, nodeBladeXname := range slsNodeBladeXnames { + hsmComponent, exists := hsmStateComponentsMap[nodeBladeXname.String()] + if !exists { + log.Debug().Msgf("%s exists in SLS, but not HSM", nodeBladeXname) + + continue + } + + if hsmComponent.State != nil { + log.Debug().Msgf("%s exists in HSM with state %s", nodeBladeXname, *hsmComponent.State) + } + for _, nodeBMCXname := range slsNodeBladesFound[nodeBladeXname] { + // Don't need to do this if we already identified the blade + if _, exists := nodeBladeDeviceSlugs[nodeBladeXname]; exists { + continue + } + + // For every BMC in HSM there is a NodeEnclosure. The NodeEnclosure ordinal matches + // the BMC ordinal + nodeEnclosureXname := nodeBladeXname.NodeEnclosure(nodeBMCXname.NodeBMC) + + nodeEnclosure, exists := hsmHardwareInventoryMap[nodeEnclosureXname.String()] + if !exists { + log.Warn().Msgf("%s is missing from HSM hardware inventory, possible phantom hardware", nodeEnclosureXname) + continue // TODO what should happen here? + } + + if nodeEnclosure.PopulatedFRU == nil { + log.Warn().Msgf("%s is missing PopulatedFRU data", nodeEnclosureXname) + continue // TODO what should happen here? + } + + if nodeEnclosure.PopulatedFRU.HMSNodeEnclosureFRUInfo == nil { + log.Warn().Msgf("%s is missing PopulatedFRU node enclosure data", nodeEnclosureXname) + continue // TODO what should happen here? + } + nodeEnclosureFru := nodeEnclosure.PopulatedFRU.HMSNodeEnclosureFRUInfo + + log.Debug().Msgf("%s has manufacturer %s and model %s", nodeEnclosureXname, nodeEnclosureFru.Manufacturer, nodeEnclosureFru.Model) + + bladeDeviceSlug, err := csm.identifyDeviceSlug(nodeEnclosureFru.Manufacturer, nodeEnclosureFru.Model) + if err != nil { + log.Warn().Msgf("%s unable to determine blade device slug from Node Enclosure FRU data: %s", nodeEnclosureXname, err) + continue + } + + nodeBladeDeviceSlugs[nodeBladeXname] = bladeDeviceSlug + + log.Debug().Msgf("%s has blade device slug: %s", nodeBladeXname, bladeDeviceSlug) + } + + } + + // 3. + for nodeBladeXname, deviceSlug := range nodeBladeDeviceSlugs { + // Check to see if the node blade exists + + nodeBladeLocationPath, err := FromXname(nodeBladeXname) + if err != nil { + return errors.Join(fmt.Errorf("failed to build location path for xname (%v)", nodeBladeXname), err) + } + cNodeBlade, err := tempDatastore.GetAtLocation(nodeBladeLocationPath) + if err == nil { + // Blade currently exists + log.Debug().Msgf("Node blade %s (%v) exists in datastore with ID (%s)", nodeBladeXname, nodeBladeLocationPath, cNodeBlade.ID) + + // TODO Build metadata from sls data for merging + + } else if errors.Is(err, inventory.ErrHardwareNotFound) { + // Node blade does not exist + + // Determine the chassis ID + chassisLocationPath, err := FromXname(nodeBladeXname.Parent()) + if err != nil { + return errors.Join(fmt.Errorf("failed to build location path for xname (%v)", nodeBladeXname), err) + } + cChassis, err := tempDatastore.GetAtLocation(chassisLocationPath) + if err != nil { + return errors.Join(fmt.Errorf("failed to get datastore ID for %v", chassisLocationPath), err) + } + + // Now its time to build up what the hardware looks like + newHardware, err := csm.buildInventoryHardware(deviceSlug, nodeBladeXname.ComputeModule, cChassis.ID, inventory.HardwareStatusProvisioned) + if err != nil { + return errors.Join(fmt.Errorf("failed to build hardware for node blade (%s)", nodeBladeXname.String()), err) + } + + // Push the new hardware into the datastore + for _, cHardware := range newHardware { + log.Debug().Msgf("Hardware from node blade %s: %s", nodeBladeXname.String(), cHardware.ID) + if err := tempDatastore.Add(&cHardware); err != nil { + return fmt.Errorf("failed to add hardware (%s) to in memory datastore", cHardware.ID) + } + } + + } else { + // Error occurred + return errors.Join(fmt.Errorf("failed to query datastore"), err) + } + } + + // Update node metadata in CANI and SLS + for _, slsNode := range allNodes { + nodeXname := xnames.FromString(slsNode.Xname) + if nodeXname == nil { + return fmt.Errorf("failed to parse node xname (%s)", slsNode.Xname) + } + + nodeLocationPath, err := FromXname(nodeXname) + if err != nil { + return errors.Join(fmt.Errorf("failed to build location path for xname (%v)", nodeXname), err) + } + + // + // Build up node extra properties for CANI + // + slsNodeEP, err := sls.DecodeExtraProperties[sls_client.HardwareExtraPropertiesNode](slsNode) + if err != nil { + return errors.Join(fmt.Errorf("failed to decode hardware extra properties for (%s)", slsNode.Xname), err) + } + + nodeMetadata := NodeMetadata{} + if slsNodeEP.Role != "" { + nodeMetadata.Role = StringPtr(slsNodeEP.Role) + } + + if slsNodeEP.SubRole != "" { + nodeMetadata.Role = StringPtr(slsNodeEP.SubRole) + } + + if slsNodeEP.NID != 0 { + nodeMetadata.Nid = IntPtr(int(slsNodeEP.NID)) + } + + if len(slsNodeEP.Aliases) != 0 { + nodeMetadata.Alias = slsNodeEP.Aliases + } + + cNode, err := tempDatastore.GetAtLocation(nodeLocationPath) + if errors.Is(err, inventory.ErrHardwareNotFound) { + log.Warn().Msgf("Hardware does not exist (possible phantom hardware): %s", nodeLocationPath) + // This is a phantom node, and we need to push this into the inventory to preserve the logical information + // of the node + // TODO an interesting scenario to test with this would be the an bard peak blade in the location that that SLS assumes to be a windom blade + + // The cabinet and chassis should exist + // cChassis, err := tempDatastore.GetAtLocation(nodeLocationPath[0:3]) + // if errors.Is(err, inventory.ErrHardwareNotFound) { + // return errors.Join(fmt.Errorf("failed to query datastore for %s", nodeLocationPath), err) + // } else if err != nil { + // return errors.Join(fmt.Errorf("chassis of phantom node (%s) does not exist in datastore", nodeLocationPath), err) + // } + + // // The Node Blade may not exist + // cNodeBlade, err := tempDatastore.GetAtLocation(nodeLocationPath[0:4]) + // if errors.Is(err, inventory.ErrHardwareNotFound) { + // // It doesn't exist, so lets create an empty one + // cNodeBlade = inventory.Hardware{ + // Parent: cChassis.ID, + // } + // tempDatastore.Add() + // } else if err != nil { + // return errors.Join(fmt.Errorf("failed to query datastore for %s", nodeLocationPath), err) + // } + + // // The Node Card may not exist + // nodeCardExists, err := nodeLocationPath[0:5].Exists(tempDatastore) + // if err != nil { + // return errors.Join(fmt.Errorf("failed to query datastore for %s", nodeLocationPath), err) + // } + + // log.Fatal().Msg("Panic!") + continue + } else if err != nil { + return errors.Join(fmt.Errorf("failed to query datastore for %s", nodeLocationPath), err) + } + + // Initialize the properties map if not done already + if cNode.ProviderProperties == nil { + cNode.ProviderProperties = map[string]interface{}{} + } + cNode.ProviderProperties["csm"] = nodeMetadata + + // Push updates into the datastore + if err := tempDatastore.Update(&cNode); err != nil { + return fmt.Errorf("failed to update hardware (%s) in memory datastore", cNode.ID) + } + + // + // Update SLS Extra Properties + // + if slsNodeEP.CaniId != cNode.ID.String() { + if len(slsNodeEP.CaniId) != 0 { + log.Warn().Msgf("Detected CANI hardware ID change from %s to %s for SLS Hardware %s", slsNodeEP.CaniId, cNode.ID, slsNode.Xname) + } + + // Update it if it has changed + slsNodeEP.CaniId = cNode.ID.String() + slsNodeEP.CaniSlsSchemaVersion = "v1alpha1" // TODO make this a enum + slsNodeEP.CaniLastModified = time.Now().UTC().String() + + slsNode.ExtraProperties = slsNodeEP + slsHardwareToModify[slsNode.Xname] = slsNode + + log.Debug().Msgf("SLS extra properties changed for %s", slsNode.Xname) + } + } + + // + // Import Router BMCs + // + // TODO + + // + // Handle phantom mountain/hill nodes + // + // TODO this might be better handled in the some code above + + // + // Push updates to SLS + // + if err := sls.HardwareUpdate(csm.slsClient, ctx, slsHardwareToModify, 10); err != nil { + return errors.Join(fmt.Errorf("failed to update hardware in SLS"), err) + } + + // TODO need a sls.HardwareCreate function + // for _, slsHardware := range slsHardwareToAdd { + // // Perform a POST against SLS + // + // _, r, err := csm.slsClient.HardwareApi.HardwarePost(ctx, sls.NewHardwarePostOpts( + // slsHardware, + // )) + // if err != nil { + // return errors.Join( + // fmt.Errorf("failed to add hardware (%s) to SLS", slsHardware.Xname), + // err, + // ) + // } + // log.Info().Int("status", r.StatusCode).Msg("Added hardware to SLS") + // } + + // Commit changes! + if err := datastore.Merge(tempDatastore); err != nil { + return errors.Join(fmt.Errorf("failed to merge temporary datastore with actual datastore", err)) + } + + return datastore.Flush() +} + +func (csm *CSM) buildInventoryHardware(deviceTypeSlug string, ordinal int, parentID uuid.UUID, status inventory.HardwareStatus) ([]inventory.Hardware, error) { + if csm.hardwareLibrary == nil { + panic("Hardware type library is nil") + } + + // Build up the expected hardware + // Generate a hardware build out using the system as a parent + hardwareBuildOutItems, err := csm.hardwareLibrary.GetDefaultHardwareBuildOut(deviceTypeSlug, ordinal, parentID) + if err != nil { + return nil, errors.Join( + fmt.Errorf("unable to build default hardware build out for %s", deviceTypeSlug), + err, + ) + } + + var allHardware []inventory.Hardware + for _, hardwareBuildOut := range hardwareBuildOutItems { + locationOrdinal := hardwareBuildOut.OrdinalPath[len(hardwareBuildOut.OrdinalPath)-1] + + allHardware = append(allHardware, inventory.Hardware{ + ID: hardwareBuildOut.ID, + Parent: hardwareBuildOut.ParentID, + Type: hardwareBuildOut.DeviceType.HardwareType, + DeviceTypeSlug: hardwareBuildOut.DeviceType.Slug, + Vendor: hardwareBuildOut.DeviceType.Manufacturer, + Model: hardwareBuildOut.DeviceType.Model, + + LocationOrdinal: &locationOrdinal, + + Status: inventory.HardwareStatusProvisioned, + }) + + } + + return allHardware, nil +} + +func FromXname(xnameRaw xnames.Xname) (inventory.LocationPath, error) { + // TODO Look into go generating this + + switch xname := xnameRaw.(type) { + // System + case xnames.System: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + }, nil + case *xnames.System: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + }, nil + // Cabinet + case xnames.Cabinet: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + }, nil + case *xnames.Cabinet: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + }, nil + // Chassis + case xnames.Chassis: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + }, nil + case *xnames.Chassis: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + }, nil + // Chassis BMC + case xnames.ChassisBMC: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.ChassisManagementModule, Ordinal: xname.ChassisBMC}, + }, nil + case *xnames.ChassisBMC: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.ChassisManagementModule, Ordinal: xname.ChassisBMC}, + }, nil + // Compute Module + case xnames.ComputeModule: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: xname.ComputeModule}, + }, nil + case *xnames.ComputeModule: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: xname.ComputeModule}, + }, nil + // Node BMC + case xnames.NodeBMC: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: xname.ComputeModule}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: xname.NodeBMC}, + {HardwareType: hardwaretypes.NodeController, Ordinal: 0}, // Assumes one Node BMC per node card, For all supported CSM hardware this is true + }, nil + case *xnames.NodeBMC: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: xname.ComputeModule}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: xname.NodeBMC}, + {HardwareType: hardwaretypes.NodeController, Ordinal: 0}, // Assumes one Node BMC per node card, For all supported CSM hardware this is true + }, nil + // Node + case xnames.Node: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: xname.ComputeModule}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: xname.NodeBMC}, + {HardwareType: hardwaretypes.Node, Ordinal: xname.Node}, + }, nil + case *xnames.Node: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: xname.ComputeModule}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: xname.NodeBMC}, + {HardwareType: hardwaretypes.Node, Ordinal: xname.Node}, + }, nil + // Router Module + case xnames.RouterModule: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: xname.RouterModule}, + }, nil + case *xnames.RouterModule: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: xname.RouterModule}, + }, nil + // Router BMC + case xnames.RouterBMC: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: xname.RouterModule}, + {HardwareType: hardwaretypes.HighSpeedSwitchController, Ordinal: xname.RouterBMC}, + }, nil + case *xnames.RouterBMC: + return inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: xname.Cabinet}, + {HardwareType: hardwaretypes.Chassis, Ordinal: xname.Chassis}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: xname.RouterModule}, + {HardwareType: hardwaretypes.HighSpeedSwitchController, Ordinal: xname.RouterBMC}, + }, nil + } + + return nil, fmt.Errorf("unable to convert xname type (%s)", xnameRaw.Type()) +} + +func (csm *CSM) identifyDeviceSlug(manufacturer, model string) (string, error) { + for deviceSlug, deviceType := range csm.hardwareLibrary.DeviceTypes { + for _, identification := range deviceType.Identifications { + // log.Info().Msgf("Checking %v against [%s, %s]", identification, manufacturer, model) + if identification.Manufacturer == manufacturer && identification.Model == model { + return deviceSlug, nil + } + } + } + + return "", fmt.Errorf("unable to find corrensponding device slug for manufacturer (%s) and model (%s)", manufacturer, model) +} diff --git a/internal/provider/csm/import_test.go b/internal/provider/csm/import_test.go new file mode 100644 index 00000000..633c4c84 --- /dev/null +++ b/internal/provider/csm/import_test.go @@ -0,0 +1,334 @@ +package csm + +import ( + "testing" + + "github.com/Cray-HPE/cani/internal/inventory" + "github.com/Cray-HPE/cani/pkg/hardwaretypes" + "github.com/Cray-HPE/hms-xname/xnames" + "github.com/stretchr/testify/suite" +) + +type FromXnameSuite struct { + suite.Suite +} + +func (suite *FromXnameSuite) TestSystem() { + lp, err := FromXname(xnames.System{}) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestSystemPointer() { + lp, err := FromXname(&xnames.System{}) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestCabinet() { + lp, err := FromXname(xnames.Cabinet{ + Cabinet: 1000, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestCabinetPointer() { + lp, err := FromXname(&xnames.Cabinet{ + Cabinet: 1000, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestChassis() { + lp, err := FromXname(xnames.Chassis{ + Cabinet: 1000, + Chassis: 2, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestChassisPointer() { + lp, err := FromXname(&xnames.Chassis{ + Cabinet: 1000, + Chassis: 2, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestChassisBMC() { + lp, err := FromXname(xnames.ChassisBMC{ + Cabinet: 1000, + Chassis: 2, + ChassisBMC: 0, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.ChassisManagementModule, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestChassisBMCPointer() { + lp, err := FromXname(&xnames.ChassisBMC{ + Cabinet: 1000, + Chassis: 2, + ChassisBMC: 0, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.ChassisManagementModule, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestComputeModule() { + lp, err := FromXname(xnames.ComputeModule{ + Cabinet: 1000, + Chassis: 2, + ComputeModule: 7, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: 7}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestComputeModulePointer() { + lp, err := FromXname(&xnames.ComputeModule{ + Cabinet: 1000, + Chassis: 2, + ComputeModule: 7, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: 7}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestNodeBMC() { + lp, err := FromXname(xnames.NodeBMC{ + Cabinet: 1000, + Chassis: 2, + ComputeModule: 7, + NodeBMC: 1, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: 7}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: 1}, + {HardwareType: hardwaretypes.NodeController, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestNodeBMCPointer() { + lp, err := FromXname(&xnames.NodeBMC{ + Cabinet: 1000, + Chassis: 2, + ComputeModule: 7, + NodeBMC: 1, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: 7}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: 1}, + {HardwareType: hardwaretypes.NodeController, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestNode() { + lp, err := FromXname(xnames.Node{ + Cabinet: 1000, + Chassis: 2, + ComputeModule: 7, + NodeBMC: 1, + Node: 3, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: 7}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: 1}, + {HardwareType: hardwaretypes.Node, Ordinal: 3}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestNodePointer() { + lp, err := FromXname(&xnames.Node{ + Cabinet: 1000, + Chassis: 2, + ComputeModule: 7, + NodeBMC: 1, + Node: 3, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.NodeBlade, Ordinal: 7}, + {HardwareType: hardwaretypes.NodeCard, Ordinal: 1}, + {HardwareType: hardwaretypes.Node, Ordinal: 3}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestRouterModule() { + lp, err := FromXname(xnames.RouterModule{ + Cabinet: 1000, + Chassis: 2, + RouterModule: 7, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: 7}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestRouterModulePointer() { + lp, err := FromXname(&xnames.RouterModule{ + Cabinet: 1000, + Chassis: 2, + RouterModule: 7, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: 7}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestRouterBMC() { + lp, err := FromXname(xnames.RouterBMC{ + Cabinet: 1000, + Chassis: 2, + RouterModule: 7, + RouterBMC: 0, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: 7}, + {HardwareType: hardwaretypes.HighSpeedSwitchController, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func (suite *FromXnameSuite) TestRouterBMCPointer() { + lp, err := FromXname(&xnames.RouterBMC{ + Cabinet: 1000, + Chassis: 2, + RouterModule: 7, + RouterBMC: 0, + }) + suite.NoError(err) + + expectedLP := inventory.LocationPath{ + {HardwareType: hardwaretypes.System, Ordinal: 0}, + {HardwareType: hardwaretypes.Cabinet, Ordinal: 1000}, + {HardwareType: hardwaretypes.Chassis, Ordinal: 2}, + {HardwareType: hardwaretypes.HighSpeedSwitchEnclosure, Ordinal: 7}, + {HardwareType: hardwaretypes.HighSpeedSwitchController, Ordinal: 0}, + } + + suite.Equal(expectedLP, lp) +} + +func TestFromXnameSuite(t *testing.T) { + suite.Run(t, new(FromXnameSuite)) +} diff --git a/internal/provider/csm/metadata.go b/internal/provider/csm/metadata.go index 4acd2ddd..f1d4046f 100644 --- a/internal/provider/csm/metadata.go +++ b/internal/provider/csm/metadata.go @@ -13,7 +13,7 @@ type NodeMetadata struct { Role *string SubRole *string Nid *int - Alias *string + Alias []string AdditionalProperties map[string]interface{} } @@ -39,6 +39,8 @@ func GetProviderMetadata(cHardware inventory.Hardware) (result interface{}, err switch cHardware.Type { case hardwaretypes.Node: result = NodeMetadata{} + case hardwaretypes.Cabinet: + result = CabinetMetadata{} default: // This may be caused if new metadata structs are added, but not to this switch case return nil, fmt.Errorf("hardware object (%s) has unexpected provider metadata", cHardware.ID) @@ -75,3 +77,81 @@ func GetProviderMetadataT[T any](cHardware inventory.Hardware) (*T, error) { } return &metadata, nil } + +func (csm *CSM) BuildHardwareMetadata(cHardware *inventory.Hardware, rawProperties map[string]interface{}) error { + if cHardware.ProviderProperties == nil { + cHardware.ProviderProperties = map[string]interface{}{} + } + + switch cHardware.Type { + case hardwaretypes.Cabinet: + properties := CabinetMetadata{} + if _, exists := cHardware.ProviderProperties["csm"]; exists { + // If one exists set it. + if err := mapstructure.Decode(cHardware.ProviderProperties["csm"], &properties); err != nil { + return err + } + } + + // Make changes to the node metadata + // The keys of rawProperties need to match what is defined in ./cmd/cabinet/add_cabinet.go + if vlanIDRaw, exists := rawProperties["vlanID"]; exists { + if vlanIDRaw == nil { + properties.HMNVlan = nil + } else { + properties.HMNVlan = IntPtr(vlanIDRaw.(int)) + } + } + + cHardware.ProviderProperties["csm"] = properties + + return nil + case hardwaretypes.Node: + // TODO do something interesting with the raw data, and convert it/validate it + properties := NodeMetadata{} // Create an empty one + if _, exists := cHardware.ProviderProperties["csm"]; exists { + // If one exists set it. + if err := mapstructure.Decode(cHardware.ProviderProperties["csm"], &properties); err != nil { + return err + } + } + // Make changes to the node metadata + // The keys of rawProperties need to match what is defined in ./cmd/node/update_node.go + if roleRaw, exists := rawProperties["role"]; exists { + if roleRaw == nil { + properties.Role = nil + } else { + properties.Role = StringPtr(roleRaw.(string)) + } + } + if subroleRaw, exists := rawProperties["subrole"]; exists { + if subroleRaw == nil { + properties.SubRole = nil + } else { + properties.SubRole = StringPtr(subroleRaw.(string)) + } + } + if nidRaw, exists := rawProperties["nid"]; exists { + if nidRaw == nil { + properties.Nid = nil + } else { + properties.Nid = IntPtr(nidRaw.(int)) + } + } + if aliasRaw, exists := rawProperties["alias"]; exists { + if aliasRaw == nil { + properties.Alias = nil + } else { + properties.Alias = []string{aliasRaw.(string)} + } + } + + cHardware.ProviderProperties["csm"] = properties + + return nil + default: + // This hardware type doesn't have metadata for it right now + return nil + } + +} diff --git a/internal/provider/csm/reconcile.go b/internal/provider/csm/reconcile.go index 4ef690e8..a06b7a31 100644 --- a/internal/provider/csm/reconcile.go +++ b/internal/provider/csm/reconcile.go @@ -9,7 +9,6 @@ import ( "github.com/Cray-HPE/cani/internal/inventory" "github.com/Cray-HPE/cani/internal/provider/csm/sls" - "github.com/Cray-HPE/cani/internal/provider/csm/validate" sls_client "github.com/Cray-HPE/cani/pkg/sls-client" "github.com/Cray-HPE/hms-xname/xnametypes" "github.com/rs/zerolog/log" @@ -79,7 +78,7 @@ func (csm *CSM) Reconcile(ctx context.Context, datastore inventory.Datastore) (e // Identify hardware present in both states // Does not take into account differences in Class/ExtraProperties, just by the primary key of xname - identicalHardware, hardwareWithDifferingValues, err := sls.HardwareUnion(currentSLSState, expectedSLSState) + identicalHardware, hardwareWithDifferingValues, err := sls.HardwareUnion(expectedSLSState, currentSLSState) if err != nil { return err } @@ -94,7 +93,7 @@ func (csm *CSM) Reconcile(ctx context.Context, datastore inventory.Datastore) (e // unexpectedHardwareRemoval := []sls_client.Hardware{} for _, hardware := range hardwareRemoved { - if hardwareMapping[hardware.Xname].Status != inventory.HardwareStatusDecomissioned { + if hardwareMapping[hardware.Xname].Status != inventory.HardwareStatusDecommissioned { // This piece of hardware wasn't flagged for removal from the system, but a // the reconcile logic wants to remove it and this is bad unexpectedHardwareRemoval = append(unexpectedHardwareRemoval, hardware) @@ -139,14 +138,15 @@ func (csm *CSM) Reconcile(ctx context.Context, datastore inventory.Datastore) (e modifiedState.Hardware[hardware.Xname] = hardware } for _, hardwarePair := range hardwareWithDifferingValues { - updatedHardware := hardwarePair.HardwareB + updatedHardware := hardwarePair.HardwareA modifiedState.Hardware[updatedHardware.Xname] = updatedHardware } - _, err = validate.Validate(&modifiedState) - if err != nil { - return fmt.Errorf("Validation failed. %v\n", err) - } + // TODO something is broken with + // _, err = validate.Validate(&modifiedState) + // if err != nil { + // return fmt.Errorf("Validation failed. %v\n", err) + // } // // Modify the System's SLS instance @@ -319,6 +319,8 @@ func displayUnwantedChanges(unwantedHardwareRemoved, unwantedHardwareAdded []sls } func buildHardwareString(hardware sls_client.Hardware) (string, error) { + // TODO include CANU UUID + extraPropertiesRaw, err := hardware.DecodeExtraProperties() if err != nil { return "", err @@ -328,10 +330,15 @@ func buildHardwareString(hardware sls_client.Hardware) (string, error) { tokens = append(tokens, fmt.Sprintf("Type: %s", hardware.TypeString)) switch hardware.TypeString { - case xnametypes.Cabinet: - // Nothing to do + // case xnametypes.Cabinet: + // // If we don't know how to pretty print it, lets just do the raw JSON + // extraPropertiesRaw, err := json.Marshal(hardware.ExtraProperties) + // if err != nil { + // return "", err + // } + // tokens = append(tokens, string(extraPropertiesRaw)) case xnametypes.Chassis: - // Nothing to do + // Nothing to do case xnametypes.ChassisBMC: // Nothing to do case xnametypes.CabinetPDUController: diff --git a/internal/provider/csm/sls/diff_sls.go b/internal/provider/csm/sls/diff_sls.go index 4b6b2f03..45078051 100644 --- a/internal/provider/csm/sls/diff_sls.go +++ b/internal/provider/csm/sls/diff_sls.go @@ -142,24 +142,131 @@ func HardwareUnion(a, b sls_client.SlsState) (identicalHardware []sls_client.Har } func stripIpInformationFromHardware(extraPropertiesRaw interface{}) interface{} { + // Helper command to build up the switch + // grep -R "type HardwareExtraProperties" -R ./pkg/sls-client/ | grep -v HardwareExtraPropertiesCabinetNetworks | awk '{print $2 ":" }' | sort | sed -e 's/^/case sls_client./' switch ep := extraPropertiesRaw.(type) { + case sls_client.HardwareExtraPropertiesBmcNic: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesCabPduNic: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesCabPduPwrConnector: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep case sls_client.HardwareExtraPropertiesCabinet: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + ep.Networks = nil + ep.Model = "" // TODO deal with this at somepoint // if cabinetKind := csi.CabinetKind(ep.Model); cabinetKind.IsModel() { // ep.Model = "" // } return ep + case sls_client.HardwareExtraPropertiesCduMgmtSwitch: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesChassis: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesChassisBmc: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesCompmod: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesCompmodPowerConnector: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesHsnConnector: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep case sls_client.HardwareExtraPropertiesMgmtHlSwitch: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + ep.IP4addr = "" ep.IP6addr = "" ep.Model = "" // Not guaranteed that the system was installed with information about the switch model. return ep case sls_client.HardwareExtraPropertiesMgmtSwitch: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + ep.IP4addr = "" ep.IP6addr = "" ep.Model = "" // Not guaranteed that the system was installed with information about the switch model. return ep + case sls_client.HardwareExtraPropertiesMgmtSwitchConnector: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesNcard: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesNode: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesNodeHsnNic: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesNodeNic: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesRtrBmc: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesRtrBmcNic: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesRtrmod: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep + case sls_client.HardwareExtraPropertiesSystem: + ep.CaniId = "" + ep.CaniSlsSchemaVersion = "" + ep.CaniLastModified = "" + return ep } return extraPropertiesRaw diff --git a/internal/provider/csm/sls/hardware.go b/internal/provider/csm/sls/hardware.go index b3c59ae5..f44ad76e 100644 --- a/internal/provider/csm/sls/hardware.go +++ b/internal/provider/csm/sls/hardware.go @@ -23,12 +23,17 @@ package sls import ( + "context" + "fmt" "sort" + "sync" sls_client "github.com/Cray-HPE/cani/pkg/sls-client" sls_common "github.com/Cray-HPE/hms-sls/v2/pkg/sls-common" "github.com/Cray-HPE/hms-xname/xnames" + "github.com/Cray-HPE/hms-xname/xnametypes" "github.com/antihax/optional" + "github.com/rs/zerolog/log" ) func NewHardware(xname xnames.Xname, class sls_client.HardwareClass, extraProperties interface{}) sls_client.Hardware { @@ -38,7 +43,7 @@ func NewHardware(xname xnames.Xname, class sls_client.HardwareClass, extraProper ExtraProperties: extraProperties, // Calculate derived fields - Parent: xname.ParentInterface().String(), + Parent: xnametypes.GetHMSCompParent(xname.String()), TypeString: xname.Type(), Type: sls_client.HardwareType(sls_common.HMSTypeToHMSStringType(xname.Type())), // The main lookup table is in the SLS package, TODO should maybe move that into this package } @@ -92,3 +97,78 @@ func FilterHardware(allHardware map[string]sls_client.Hardware, filter func(sls_ return result, nil } + +func FilterHardwareByType(allHardware map[string]sls_client.Hardware, types ...xnametypes.HMSType) (map[string]sls_client.Hardware, error) { + return FilterHardware(allHardware, func(hardware sls_client.Hardware) (bool, error) { + for _, hmsType := range types { + if hardware.TypeString == hmsType { + return true, nil + } + } + return false, nil + }) +} + +func DecodeExtraProperties[T any](hardware sls_client.Hardware) (*T, error) { + epRaw, err := hardware.DecodeExtraProperties() + if err != nil { + return nil, err + } + + if epRaw == nil { + return nil, nil + } + + ep, ok := epRaw.(T) + if !ok { + var expectedType T + return nil, fmt.Errorf("unexpected provider metadata type (%T) expected (%T)", epRaw, expectedType) + } + return &ep, nil +} + +func HardwareUpdate(slsClient *sls_client.APIClient, ctx context.Context, hardwareToUpdate map[string]sls_client.Hardware, workers int) error { + var wg sync.WaitGroup + queue := make(chan sls_client.Hardware, 10) + // TODO need to collect errors + // errors := + updateWorker := func(id int) { + defer wg.Done() + + log.Trace().Int("worker", id).Msgf("SLS HardwareUpdate: Starting worker") + for hardware := range queue { + log.Trace().Int("worker", id).Msgf("SLS HardwareUpdate: Updating SLS hardware: %s", hardware.Xname) + // Perform a PUT against SLS + _, r, err := slsClient.HardwareApi.HardwareXnamePut(ctx, hardware.Xname, NewHardwareXnamePutOpts(hardware)) + if err != nil { + // TODO need to collect errors + // return errors.Join( + // fmt.Errorf("failed to update hardware (%s) from SLS", hardare.Xname), + // err, + // ) + log.Error().Err(err).Msg("failed to update SLS") + continue + } + log.Trace().Int("status", r.StatusCode).Msg("SLS HardwareUpdate: Updated hardware to SLS") + } + log.Trace().Int("worker", id).Msgf("SLS HardwareUpdate: Stopping worker") + + } + + for i := 0; i < workers; i++ { + wg.Add(1) + go updateWorker(i) + } + + for _, hardware := range hardwareToUpdate { + log.Trace().Msgf("SLS HardwareUpdate: Adding %s to queue", hardware.Xname) + queue <- hardware + } + log.Trace().Msgf("SLS HardwareUpdate: Queue is closed") + close(queue) + + log.Trace().Msgf("SLS HardwareUpdate: Waiting for workers to complete") + wg.Wait() + + return nil +} diff --git a/internal/provider/csm/sls_state_generator.go b/internal/provider/csm/sls_state_generator.go index fbd3f863..ce3d9a07 100644 --- a/internal/provider/csm/sls_state_generator.go +++ b/internal/provider/csm/sls_state_generator.go @@ -3,6 +3,7 @@ package csm import ( "errors" "fmt" + "time" "github.com/Cray-HPE/cani/internal/inventory" "github.com/Cray-HPE/cani/internal/provider/csm/sls" @@ -180,14 +181,42 @@ func BuildSLSHardware(cHardware inventory.Hardware, locationPath inventory.Locat switch cHardware.Type { case hardwaretypes.Cabinet: - return sls_client.Hardware{}, nil + var cabinetExtraProperties sls_client.HardwareExtraPropertiesCabinet + + // Apply CANI Metadata + cabinetExtraProperties.CaniId = cHardware.ID.String() + cabinetExtraProperties.CaniSlsSchemaVersion = "v1alpha1" + cabinetExtraProperties.CaniLastModified = time.Now().UTC().String() + + // TODO need cabinet metadata + + extraProperties = cabinetExtraProperties case hardwaretypes.Chassis: - return sls_client.Hardware{}, nil + var chassisExtraProperties sls_client.HardwareExtraPropertiesChassis + + // Apply CANI Metadata + chassisExtraProperties.CaniId = cHardware.ID.String() + chassisExtraProperties.CaniSlsSchemaVersion = "v1alpha1" + chassisExtraProperties.CaniLastModified = time.Now().UTC().String() + + extraProperties = chassisExtraProperties + case hardwaretypes.ChassisManagementModule: + var cmmExtraProperties sls_client.HardwareExtraPropertiesChassisBmc + + // Apply CANI Metadata + cmmExtraProperties.CaniId = cHardware.ID.String() + cmmExtraProperties.CaniSlsSchemaVersion = "v1alpha1" + cmmExtraProperties.CaniLastModified = time.Now().UTC().String() + + extraProperties = cmmExtraProperties case hardwaretypes.NodeBlade: + // Not represented in SLS return sls_client.Hardware{}, nil case hardwaretypes.NodeCard: + // Not represented in SLS return sls_client.Hardware{}, nil case hardwaretypes.NodeController: + // Not represented in SLS return sls_client.Hardware{}, nil case hardwaretypes.Node: metadata, err := GetProviderMetadataT[NodeMetadata](cHardware) @@ -198,8 +227,14 @@ func BuildSLSHardware(cHardware inventory.Hardware, locationPath inventory.Locat ) } + var nodeExtraProperties sls_client.HardwareExtraPropertiesNode + // Apply CANI Metadata + nodeExtraProperties.CaniId = cHardware.ID.String() + nodeExtraProperties.CaniSlsSchemaVersion = "v1alpha1" + nodeExtraProperties.CaniLastModified = time.Now().UTC().String() + + // Logical metadata if metadata != nil { - var nodeExtraProperties sls_client.HardwareExtraPropertiesNode // In order to properly populate SLS several bits of information are required. // This information should have been collected when hardware was added to the inventory @@ -217,13 +252,15 @@ func BuildSLSHardware(cHardware inventory.Hardware, locationPath inventory.Locat nodeExtraProperties.NID = int32(*metadata.Nid) } if metadata.Alias != nil { - nodeExtraProperties.Aliases = []string{*metadata.Alias} // TODO NEED TO HANDLE hardware types with multiple ALIASES + nodeExtraProperties.Aliases = metadata.Alias } - extraProperties = nodeExtraProperties log.Info().Any("nodeEp", nodeExtraProperties).Msgf("Generated Extra Properties for %s", xname.String()) } - + extraProperties = nodeExtraProperties + default: + log.Warn().Msgf("Do not known how to handle %s", xname.String()) + return sls_client.Hardware{}, nil } return sls.NewHardware(xname, class, extraProperties), nil diff --git a/internal/provider/csm/types.go b/internal/provider/csm/types.go index 6cf41714..14cc2c0e 100644 --- a/internal/provider/csm/types.go +++ b/internal/provider/csm/types.go @@ -79,99 +79,114 @@ func (xc *XnameConverter) Match(cHardware inventory.Hardware, locationPath inven return true, nil } +// TODO The schema of this structure should probably be revamped to explain what it is doing +// in a more concise/clearer way. Its not really mapping xname ordinals to location path. var enhancedTypeConverters = map[xnametypes.HMSType]XnameConverter{ xnametypes.Cabinet: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, }, }, xnametypes.CEC: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.CabinetEnvironmentalController, 1}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.CabinetEnvironmentalController, 2}, }, }, xnametypes.CabinetPDUController: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.CabinetPDUController, 1}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.CabinetPDUController, 2}, }, }, xnametypes.CabinetPDU: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.CabinetPDUController, 1}, - {hardwaretypes.CabinetPDU, 2}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.CabinetPDUController, 2}, + {hardwaretypes.CabinetPDU, 3}, }, }, xnametypes.Chassis: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, }, }, xnametypes.ChassisBMC: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.ChassisManagementModule, 2}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.ChassisManagementModule, 3}, }, }, xnametypes.ComputeModule: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.NodeBlade, 2}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.NodeBlade, 3}, }, }, xnametypes.NodeEnclosure: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.NodeBlade, 2}, - {hardwaretypes.NodeCard, 3}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.NodeBlade, 3}, + {hardwaretypes.NodeCard, 4}, }, }, xnametypes.NodeBMC: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.NodeBlade, 2}, - {hardwaretypes.NodeCard, 3}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.NodeBlade, 3}, + {hardwaretypes.NodeCard, 4}, {hardwaretypes.NodeController, -1}, }, }, xnametypes.Node: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.NodeBlade, 2}, - {hardwaretypes.NodeCard, 3}, - {hardwaretypes.Node, 4}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.NodeBlade, 3}, + {hardwaretypes.NodeCard, 4}, + {hardwaretypes.Node, 5}, }, }, xnametypes.RouterModule: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.HighSpeedSwitchEnclosure, 2}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.HighSpeedSwitchEnclosure, 3}, }, }, xnametypes.RouterBMC: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.HighSpeedSwitchEnclosure, 2}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.HighSpeedSwitchEnclosure, 3}, {hardwaretypes.HighSpeedSwitch, -1}, - {hardwaretypes.HighSpeedSwitchController, 3}, + {hardwaretypes.HighSpeedSwitchController, 4}, }, }, xnametypes.MgmtSwitch: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.ManagementSwitchEnclosure, 2}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.ManagementSwitchEnclosure, 3}, {hardwaretypes.ManagementSwitch, -1}, }, PropertyMatcher: func(cHardware inventory.Hardware) (bool, error) { @@ -188,9 +203,10 @@ var enhancedTypeConverters = map[xnametypes.HMSType]XnameConverter{ xnametypes.MgmtHLSwitchEnclosure: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.ManagementSwitchEnclosure, 2}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.ManagementSwitchEnclosure, 3}, }, PropertyMatcher: func(cHardware inventory.Hardware) (bool, error) { // Decode the properties into a struct @@ -205,10 +221,11 @@ var enhancedTypeConverters = map[xnametypes.HMSType]XnameConverter{ }, xnametypes.MgmtHLSwitch: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.Cabinet, 0}, - {hardwaretypes.Chassis, 1}, - {hardwaretypes.ManagementSwitchEnclosure, 2}, - {hardwaretypes.ManagementSwitch, 3}, + {hardwaretypes.System, -1}, + {hardwaretypes.Cabinet, 1}, + {hardwaretypes.Chassis, 2}, + {hardwaretypes.ManagementSwitchEnclosure, 3}, + {hardwaretypes.ManagementSwitch, 4}, }, PropertyMatcher: func(cHardware inventory.Hardware) (bool, error) { // Decode the properties into a struct @@ -224,13 +241,15 @@ var enhancedTypeConverters = map[xnametypes.HMSType]XnameConverter{ xnametypes.CDU: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.CoolingDistributionUnit, 0}, + {hardwaretypes.System, -1}, + {hardwaretypes.CoolingDistributionUnit, 1}, }, }, xnametypes.CDUMgmtSwitch: { XnameOrdinalMapping: []XnameOrdinal{ - {hardwaretypes.CoolingDistributionUnit, 0}, - {hardwaretypes.ManagementSwitchEnclosure, 1}, + {hardwaretypes.System, -1}, + {hardwaretypes.CoolingDistributionUnit, 1}, + {hardwaretypes.ManagementSwitchEnclosure, 2}, {hardwaretypes.ManagementSwitch, -1}, }, }, diff --git a/internal/provider/csm/types_generated.go b/internal/provider/csm/types_generated.go index 0174ec15..2d5c5460 100644 --- a/internal/provider/csm/types_generated.go +++ b/internal/provider/csm/types_generated.go @@ -43,103 +43,103 @@ func BuildXname(cHardware inventory.Hardware, locationPath inventory.LocationPat return nil, nil case xnametypes.CDU: return xnames.CDU{ - CDU: locationPath[0].Ordinal, + CDU: locationPath[1].Ordinal, }, nil case xnametypes.CDUMgmtSwitch: return xnames.CDUMgmtSwitch{ - CDU: locationPath[0].Ordinal, - CDUMgmtSwitch: locationPath[1].Ordinal, + CDU: locationPath[1].Ordinal, + CDUMgmtSwitch: locationPath[2].Ordinal, }, nil case xnametypes.Cabinet: return xnames.Cabinet{ - Cabinet: locationPath[0].Ordinal, + Cabinet: locationPath[1].Ordinal, }, nil case xnametypes.CEC: return xnames.CEC{ - Cabinet: locationPath[0].Ordinal, - CEC: locationPath[1].Ordinal, + Cabinet: locationPath[1].Ordinal, + CEC: locationPath[2].Ordinal, }, nil case xnametypes.CabinetPDUController: return xnames.CabinetPDUController{ - Cabinet: locationPath[0].Ordinal, - CabinetPDUController: locationPath[1].Ordinal, + Cabinet: locationPath[1].Ordinal, + CabinetPDUController: locationPath[2].Ordinal, }, nil case xnametypes.CabinetPDU: return xnames.CabinetPDU{ - Cabinet: locationPath[0].Ordinal, - CabinetPDUController: locationPath[1].Ordinal, - CabinetPDU: locationPath[2].Ordinal, + Cabinet: locationPath[1].Ordinal, + CabinetPDUController: locationPath[2].Ordinal, + CabinetPDU: locationPath[3].Ordinal, }, nil case xnametypes.Chassis: return xnames.Chassis{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, }, nil case xnametypes.ChassisBMC: return xnames.ChassisBMC{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - ChassisBMC: locationPath[2].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + ChassisBMC: locationPath[3].Ordinal, }, nil case xnametypes.ComputeModule: return xnames.ComputeModule{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - ComputeModule: locationPath[2].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + ComputeModule: locationPath[3].Ordinal, }, nil case xnametypes.NodeBMC: return xnames.NodeBMC{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - ComputeModule: locationPath[2].Ordinal, - NodeBMC: locationPath[3].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + ComputeModule: locationPath[3].Ordinal, + NodeBMC: locationPath[4].Ordinal, }, nil case xnametypes.Node: return xnames.Node{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - ComputeModule: locationPath[2].Ordinal, - NodeBMC: locationPath[3].Ordinal, - Node: locationPath[4].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + ComputeModule: locationPath[3].Ordinal, + NodeBMC: locationPath[4].Ordinal, + Node: locationPath[5].Ordinal, }, nil case xnametypes.NodeEnclosure: return xnames.NodeEnclosure{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - ComputeModule: locationPath[2].Ordinal, - NodeEnclosure: locationPath[3].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + ComputeModule: locationPath[3].Ordinal, + NodeEnclosure: locationPath[4].Ordinal, }, nil case xnametypes.MgmtHLSwitchEnclosure: return xnames.MgmtHLSwitchEnclosure{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - MgmtHLSwitchEnclosure: locationPath[2].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + MgmtHLSwitchEnclosure: locationPath[3].Ordinal, }, nil case xnametypes.MgmtHLSwitch: return xnames.MgmtHLSwitch{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - MgmtHLSwitchEnclosure: locationPath[2].Ordinal, - MgmtHLSwitch: locationPath[3].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + MgmtHLSwitchEnclosure: locationPath[3].Ordinal, + MgmtHLSwitch: locationPath[4].Ordinal, }, nil case xnametypes.MgmtSwitch: return xnames.MgmtSwitch{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - MgmtSwitch: locationPath[2].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + MgmtSwitch: locationPath[3].Ordinal, }, nil case xnametypes.RouterModule: return xnames.RouterModule{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - RouterModule: locationPath[2].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + RouterModule: locationPath[3].Ordinal, }, nil case xnametypes.RouterBMC: return xnames.RouterBMC{ - Cabinet: locationPath[0].Ordinal, - Chassis: locationPath[1].Ordinal, - RouterModule: locationPath[2].Ordinal, - RouterBMC: locationPath[3].Ordinal, + Cabinet: locationPath[1].Ordinal, + Chassis: locationPath[2].Ordinal, + RouterModule: locationPath[3].Ordinal, + RouterBMC: locationPath[4].Ordinal, }, nil } return nil, fmt.Errorf("unknown xnametype '%s'", hsmType.String()) diff --git a/internal/provider/csm/validate/checks/hardware_cabinet_check.go b/internal/provider/csm/validate/checks/hardware_cabinet_check.go index eeb00018..f6463d5f 100644 --- a/internal/provider/csm/validate/checks/hardware_cabinet_check.go +++ b/internal/provider/csm/validate/checks/hardware_cabinet_check.go @@ -27,6 +27,7 @@ package checks import ( "fmt" "regexp" + "strings" "github.com/Cray-HPE/cani/internal/provider/csm/validate/common" sls_client "github.com/Cray-HPE/cani/pkg/sls-client" @@ -54,6 +55,11 @@ func (c *HardwareCabinetCheck) Validate(results *common.ValidationResults) { if xnametypes.Cabinet == h.TypeString { continue } + if !strings.HasPrefix(h.Xname, "x") { + // Only check for hardware that is present withing a cabinet (starts with x), + // and not for example a CDU (starts with d). + continue + } componentId := fmt.Sprintf("/Hardware/%s", h.Xname) matches := pattern.FindAllString(h.Xname, 1) diff --git a/internal/provider/csm/validate/validate.go b/internal/provider/csm/validate/validate.go index 5cc91e7f..5a3f279c 100644 --- a/internal/provider/csm/validate/validate.go +++ b/internal/provider/csm/validate/validate.go @@ -135,7 +135,7 @@ func ValidateString(slsStateBytes []byte) ([]common.ValidationResult, error) { } func Validate(slsState *sls_client.SlsState) ([]common.ValidationResult, error) { - // If we don't get a raw SLS payload, such as validating an SLS state build inside this tool we need to create the JSON version of the paylpoad + // If we don't get a raw SLS payload, such as validating an SLS state build inside this tool we need to create the JSON version of the payload rawSLSState, err := json.Marshal(*slsState) if err != nil { return nil, err diff --git a/internal/provider/csm/validation.go b/internal/provider/csm/validation.go index 8bbee802..99060819 100644 --- a/internal/provider/csm/validation.go +++ b/internal/provider/csm/validation.go @@ -165,17 +165,19 @@ func (csm *CSM) validateInternalNode(allHardware map[uuid.UUID]inventory.Hardwar // Verify Alias is valid if metadata.Alias != nil { - nodeAliasLookup[*metadata.Alias] = append(nodeAliasLookup[*metadata.Alias], cHardware.ID) + for _, alias := range metadata.Alias { + nodeAliasLookup[alias] = append(nodeAliasLookup[alias], cHardware.ID) - if metadata.Alias != nil && len(*metadata.Alias) == 0 { - validationResult.Errors = append(validationResult.Errors, "Specified Alias is empty") - } + if metadata.Alias != nil && len(alias) == 0 { + validationResult.Errors = append(validationResult.Errors, "Specified Alias is empty") + } - // TODO a regex here might be better - if strings.Contains(*metadata.Alias, " ") { - validationResult.Errors = append(validationResult.Errors, - fmt.Sprintf("Specified alias (%d) is invalid, alias contains spaces", *metadata.Nid), - ) + // TODO a regex here might be better + if strings.Contains(alias, " ") { + validationResult.Errors = append(validationResult.Errors, + fmt.Sprintf("Specified alias (%d) is invalid, alias contains spaces", *metadata.Nid), + ) + } } } @@ -268,9 +270,9 @@ func (csm *CSM) validateInternalCabinet(allHardware map[uuid.UUID]inventory.Hard if metadata.HMNVlan != nil { // Verify the vlan is within the allowed range - if 0 <= *metadata.HMNVlan && *metadata.HMNVlan <= 4095 { + if !(0 <= *metadata.HMNVlan && *metadata.HMNVlan <= 4094) { validationResult.Errors = append(validationResult.Errors, - fmt.Sprintf("Specified HMN Vlan (%d) is invalid, must be in range: 0-4095", *metadata.HMNVlan), + fmt.Sprintf("Specified HMN Vlan (%d) is invalid, must be in range: 0-4094", *metadata.HMNVlan), ) } diff --git a/pkg/hardwaretypes/hardware-types/hpe-cabinet-eia-common.yaml b/pkg/hardwaretypes/hardware-types/hpe-cabinet-eia-common.yaml index 0b8588fa..f5bcbaa4 100644 --- a/pkg/hardwaretypes/hardware-types/hpe-cabinet-eia-common.yaml +++ b/pkg/hardwaretypes/hardware-types/hpe-cabinet-eia-common.yaml @@ -1,5 +1,17 @@ --- manufacturer: HPE +model: EX2000 +hardware-type: Cabinet +slug: hpe-eia-cabinet + +device-bays: + - name: Chassis 0 + allowed: + slug: [hpe-eia-chassis] + default: + slug: hpe-eia-chassis +--- +manufacturer: HPE model: Standard/EIA Chassis hardware-type: Chassis slug: hpe-eia-chassis diff --git a/pkg/hardwaretypes/hardware-types/hpe-node-crayex-ex425.yaml b/pkg/hardwaretypes/hardware-types/hpe-node-crayex-ex425.yaml index c265d53f..ad736789 100644 --- a/pkg/hardwaretypes/hardware-types/hpe-node-crayex-ex425.yaml +++ b/pkg/hardwaretypes/hardware-types/hpe-node-crayex-ex425.yaml @@ -14,6 +14,12 @@ device-bays: slug: [hpe-crayex-ex425-compute-blade-windom-node-card] default: slug: hpe-crayex-ex425-compute-blade-windom-node-card +identifications: + # In CSM we can learn the type of a node blade based off the contents of the NodeEnclosure + - manufacturer: HPE + model: WNC + - manufacturer: HPE + model: WindomNodeCard --- manufacturer: HPE model: EX425 AMD EPYC compute Blade, Windom Node Card (WNC) diff --git a/pkg/hardwaretypes/library.go b/pkg/hardwaretypes/library.go index 346622ca..a3166cf9 100644 --- a/pkg/hardwaretypes/library.go +++ b/pkg/hardwaretypes/library.go @@ -206,7 +206,7 @@ func (l *Library) GetDefaultHardwareBuildOut(deviceTypeString string, deviceOrdi log.Debug().Msgf("Visiting: %s", current.DeviceTypeString) currentDeviceType, ok := l.DeviceTypes[current.DeviceTypeString] if !ok { - panic(fmt.Sprint("Device type does not exist", current.DeviceType)) + return nil, fmt.Errorf("device type (%v) does not exist", current.DeviceType) } // Retrieve the hardware type at this point in time, so we only lookup in the map once @@ -214,9 +214,9 @@ func (l *Library) GetDefaultHardwareBuildOut(deviceTypeString string, deviceOrdi current.HardwareTypePath = append(current.HardwareTypePath, current.DeviceType.HardwareType) for _, deviceBay := range currentDeviceType.DeviceBays { - log.Debug().Msgf("Device bay: %s", deviceBay.Name) + log.Debug().Msgf(" Device bay: %s", deviceBay.Name) if deviceBay.Default != nil { - log.Debug().Msgf("Default: %s", deviceBay.Default.Slug) + log.Debug().Msgf(" Default: %s", deviceBay.Default.Slug) // Extract the ordinal // This is one way of going about, but it assumes that each name has a number @@ -225,7 +225,6 @@ func (l *Library) GetDefaultHardwareBuildOut(deviceTypeString string, deviceOrdi // - Get all of the device base with that type, and then sort them lexicographically. This is how HSM does it, but assumes the names can be sorted in a predictable order r := regexp.MustCompile(`\d+`) match := r.FindString(deviceBay.Name) - log.Debug().Msgf("%s|%s\n", deviceBay.Name, match) var ordinal int if match != "" { diff --git a/pkg/hardwaretypes/types.go b/pkg/hardwaretypes/types.go index 5c9e5be0..150ff5d2 100644 --- a/pkg/hardwaretypes/types.go +++ b/pkg/hardwaretypes/types.go @@ -110,7 +110,13 @@ type DeviceType struct { // PowerPowers []PowerPower `yaml:"power-ports"` // PowerOutlets []PowerOutlets `yaml:"power-outlets"` - DeviceBays []DeviceBay `yaml:"device-bays"` + DeviceBays []DeviceBay `yaml:"device-bays"` + Identifications []Identification `yaml:"identifications"` +} + +type Identification struct { + Manufacturer string `yaml:"manufacturer"` + Model string `yaml:"model"` } type DeviceBay struct { diff --git a/pkg/hsm-client/.swagger-codegen-ignore b/pkg/hsm-client/.swagger-codegen-ignore index c5fa491b..4e36aea9 100644 --- a/pkg/hsm-client/.swagger-codegen-ignore +++ b/pkg/hsm-client/.swagger-codegen-ignore @@ -21,3 +21,5 @@ #docs/*.md # Then explicitly reverse the ignore rule for a single file: #!docs/README.md + +model_hw_inventory_1_0_0_hw_inventory_by_fru.go diff --git a/pkg/hsm-client/.swagger-codegen/VERSION b/pkg/hsm-client/.swagger-codegen/VERSION index 34ec317a..74b4a508 100644 --- a/pkg/hsm-client/.swagger-codegen/VERSION +++ b/pkg/hsm-client/.swagger-codegen/VERSION @@ -1 +1 @@ -3.0.42 \ No newline at end of file +3.0.43 \ No newline at end of file diff --git a/pkg/hsm-client/README.md b/pkg/hsm-client/README.md index ffa9cda7..4d359aa5 100644 --- a/pkg/hsm-client/README.md +++ b/pkg/hsm-client/README.md @@ -1,4 +1,4 @@ -# Go API client for swagger +# Go API client for hsm_client The Hardware State Manager (HSM) inventories, monitors, and manages hardware, and tracks the logical and dynamic component states, such as roles, NIDs, and other basic metadata needed to provide most common administrative and operational functions. HSM is the single source of truth for the state of the system. It contains the component state and information on Redfish endpoints for communicating with components via Redfish. It also allows administrators to create partitions and groups for other uses. ## Resources ### /State/Components HMS components are created during inventory discovery and provide a higher-level representation of the component, including state, NID, role (i.e. compute/service), subtype, and so on. Unlike ComponentEndpoints, however, they are not strictly linked to the parent RedfishEndpoint, and are not automatically deleted when the RedfishEndpoints are (though they can be deleted via a separate call). This is because these components can also represent abstract components, such as removed components (e.g. which would remain, but have their states changed to \"Empty\" upon removal). ### /Defaults/NodeMaps This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to Node IDs, and optionally, to roles and subroles. These mappings are used when discovering nodes for the first time. These mappings should be uploaded prior to discovery and should contain mappings for each valid node xname in the system, whether populated or not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally role and subrole. Role can be Compute, Application, Storage, Management etc. The NodeMaps collection can be uploaded to HSM automatically at install time by specifying it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, and inventory discovery is performed by HSM. The desired NID numbers will be set as soon as the nodes are created using the NodeMaps collection. It is recommended that Nodemaps are uploaded at install time before discovery happens. If they are uploaded after discovery, then the node xnames need to be manually updated with the correct NIDs. You can update NIDs for individual components by using PATCH /State/Components/{xname}/NID. ### /Inventory/Hardware This resource shows the hardware inventory of the entire system and contains FRU information in location. All entries are displayed as a flat array. ### /Inventory/HardwareByFRU Every component has FRU information. This resource shows the hardware inventory for all FRUs or for a specific FRU irrespective of the location. This information is constant regardless of where the hardware item is currently in the system. If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will have the corresponding HWInventoryByFRU object embedded. This FRU info can also be looked up by FRU ID regardless of the current location. ### /Inventory/Hardware/Query/{xname} This resource gets you information about a specific component and it's sub-components. The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the entire system. ### /Inventory/RedfishEndpoints This is a BMC or other Redfish controller that has a Redfish entry point and Redfish service root. It is used to discover the components managed by this endpoint during discovery and handles all Redfish interactions by these subcomponents. If the endpoint has been discovered, this entry will include the ComponentEndpoint entries for these managed subcomponents. You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. The xname identifies the location of all components in the system, including chassis, controllers, nodes, and so on. Redfish endpoints are given to State Manager. ### /Inventory/ComponentEndpoints Component Endpoints are the specific URLs for each individual component that are under the Redfish endpoint. Component endpoints are discovered during inventory discovery. They are the management-plane representation of system components and are linked to the parent Redfish Endpoint. They provide a glue layer to bridge the higher-level representation of a component with how it is represented locally by Redfish. The collection of ComponentEndpoints can be obtained in full, optionally filtered on certain criteria (e.g. obtain just Node components), or accessed by their xname IDs individually. ### /Inventory/ServiceEndpoints ServiceEndpoints help you do things on Redfish like updating the firmware. They are discovered during inventory discovery. ### /groups Groups are named sets of system components, most commonly nodes. A group groups components under an administratively chosen label (group name). Each component may belong to any number of groups. If a group has exclusiveGroup= set, then a node may only be a member of one group that matches that exclusive label. For example, if the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', then a component that is part of 'green' could not also be placed in 'red'. You can create, modify, or delete a group and its members. You can also use group names as filters for API calls. ### /partitions A partition is a formal, non-overlapping division of the system that forms an administratively distinct sub-system. Each component may belong to at most one partition. Partitions are used as an access control mechanism or for implementing multi-tenancy. You can create, modify, or delete a partition and its members. You can also use partitions as filters for other API calls. ### /memberships A membership shows the association of a component xname to its set of group labels and partition names. There can be many group labels and up to one partition per component. Memberships are not modified directly, as the underlying group or partition is modified instead. A component can be removed from one of the listed groups or partitions or added via POST as well as being present in the initial set of members when a partition or group is created. You can retrieve the memberships for components or memberships for a specific xname. ### /Inventory/DiscoveryStatus Check discovery status for all components or you can track the status for a specific job ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains status information about the discovery operation for clients to query. The discover operation returns a link or links to status objects so that a client can determine when the discovery operation is complete. ### /Inventory/Discover Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint objects are created, inventory discovery will query these controllers and create or update management plane and managed plane objects representing the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). ### /Subscriptions/SCN Manage subscriptions to state change notifications (SCNs) from HSM. You can also subscribe to state change notifications by using the HMS Notification Fanout Daemon API. ## Workflows ### Add and Delete a Redfish Endpoint #### POST /Inventory/RedfishEndpoints When you manually create Redfish endpoints, the discovery is automatically initiated. You would create Redfish endpoints for components that are not automatically discovered by REDS or MEDS. #### GET /Inventory/RedfishEndpoints Check the Redfish endpoints that have been added and check the status of discovery. #### DELETE /Inventory/RedfishEndpoints/{xname} Delete a specific Redfish endpoint. ### Perform Inventory Discovery #### POST /Inventory/Discover Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. If needed, specify an ID or hostname (xname) in the payload. #### GET /Inventory/DiscoveryStatus Check the discovery status of all Redfish endpoints. You can also check the discovery status for each individual component by providing ID. ### Query and Update HMS Components (State/NID) #### GET /State/Components Retrieve all HMS Components found by inventory discovery as a named (\"Components\") array. #### PATCH /State/Components/{xname}/Enabled Modify the component's Enabled field. #### DELETE /State/Components/{xname} Delete a specific HMS component by providing its xname. As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints are deleted. ### Create and Delete a New Group #### GET /hsm/v2/State/Components Retrieve a list of desired components and their state. Select the nodes that you want to group. #### POST /groups Create the new group with desired members. Provide a group label (required), description, name, members etc. in the JSON payload. #### GET /groups/{group_label} Retrieve the group that was create with the label. #### GET /State/Components/{group_label} Retrieve the current state for all the components in the group. #### DELETE /groups/{group_label} Delete the group specified by {group_label}. ## Valid State Transitions ``` Prior State -> New State - Reason Ready -> Standby - HBTD if node has many missed heartbeats Ready -> Ready/Warning - HBTD if node has a few missed heartbeats Standby -> Ready - HBTD Node re-starts heartbeating On -> Ready - HBTD Node started heartbeating Off -> Ready - HBTD sees heartbeats before Redfish Event (On) Standby -> On - Redfish Event (On) or if re-discovered while in the standby state Off -> On - Redfish Event (On) Standby -> Off - Redfish Event (Off) Ready -> Off - Redfish Event (Off) On -> Off - Redfish Event (Off) Any State -> Empty - Redfish Endpoint is disabled meaning component removal ``` Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. @@ -12,7 +12,7 @@ This API client was generated by the [swagger-codegen](https://github.com/swagge ## Installation Put the package under your project folder and add the following in import: ```golang -import "./swagger" +import "./hsm_client" ``` ## Documentation for API Endpoints @@ -274,6 +274,8 @@ Class | Method | HTTP request | Description - [HwInvByFruComputeModule](docs/HwInvByFruComputeModule.md) - [HwInvByFruDrive](docs/HwInvByFruDrive.md) - [HwInvByFruMemory](docs/HwInvByFruMemory.md) + - [HwInvByFruMgmtHlSwitch](docs/HwInvByFruMgmtHlSwitch.md) + - [HwInvByFruMgmtSwitch](docs/HwInvByFruMgmtSwitch.md) - [HwInvByFruNode](docs/HwInvByFruNode.md) - [HwInvByFruNodeAccel](docs/HwInvByFruNodeAccel.md) - [HwInvByFruNodeAccelRiser](docs/HwInvByFruNodeAccelRiser.md) @@ -284,11 +286,13 @@ Class | Method | HTTP request | Description - [HwInvByFruProcessor](docs/HwInvByFruProcessor.md) - [HwInvByFruRouterBmc](docs/HwInvByFruRouterBmc.md) - [HwInvByFruRouterModule](docs/HwInvByFruRouterModule.md) + - [HwInvByFrucduMgmtSwitch](docs/HwInvByFrucduMgmtSwitch.md) - [HwInvByFrucmmRectifier](docs/HwInvByFrucmmRectifier.md) - [HwInvByFruhsnBoard](docs/HwInvByFruhsnBoard.md) - [HwInvByFruhsnnic](docs/HwInvByFruhsnnic.md) - [HwInvByFrupdu](docs/HwInvByFrupdu.md) - [HwInvByLocCabinet](docs/HwInvByLocCabinet.md) + - [HwInvByLocCduMgmtSwitch](docs/HwInvByLocCduMgmtSwitch.md) - [HwInvByLocChassis](docs/HwInvByLocChassis.md) - [HwInvByLocCmmRectifier](docs/HwInvByLocCmmRectifier.md) - [HwInvByLocComputeModule](docs/HwInvByLocComputeModule.md) @@ -296,6 +300,8 @@ Class | Method | HTTP request | Description - [HwInvByLocHsnBoard](docs/HwInvByLocHsnBoard.md) - [HwInvByLocHsnnic](docs/HwInvByLocHsnnic.md) - [HwInvByLocMemory](docs/HwInvByLocMemory.md) + - [HwInvByLocMgmtHlSwitch](docs/HwInvByLocMgmtHlSwitch.md) + - [HwInvByLocMgmtSwitch](docs/HwInvByLocMgmtSwitch.md) - [HwInvByLocNode](docs/HwInvByLocNode.md) - [HwInvByLocNodeAccel](docs/HwInvByLocNodeAccel.md) - [HwInvByLocNodeAccelRiser](docs/HwInvByLocNodeAccelRiser.md) diff --git a/pkg/hsm-client/api/swagger.yaml b/pkg/hsm-client/api/swagger.yaml index 0cb45800..356cc62b 100644 --- a/pkg/hsm-client/api/swagger.yaml +++ b/pkg/hsm-client/api/swagger.yaml @@ -547,6 +547,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -1837,6 +1840,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -2485,6 +2491,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -3186,6 +3195,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -3414,6 +3426,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -3989,6 +4004,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -4630,6 +4648,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -4670,7 +4691,7 @@ paths: explode: true schema: type: string - - name: laststatus + - name: lastdiscoverystatus in: query description: "Retrieve the RedfishEndpoints with the given discovery status.\ \ This can be negated (i.e. !DiscoverOK). Valid values are: EndpointInvalid,\ @@ -5161,6 +5182,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -6750,7 +6774,7 @@ paths: $ref: '#/components/schemas/Subscriptions_SCNPostSubscription' required: true responses: - "200": + "204": description: Success. The subscription has been overwritten. content: {} "400": @@ -8023,6 +8047,9 @@ paths: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -9627,6 +9654,27 @@ components: readOnly: true items: $ref: '#/components/schemas/HWInvByLocHSNBoard' + MgmtSwitches: + type: array + description: All appropriate components with HMS type 'MgmtSwitch' given + Target component/partition and query type. + readOnly: true + items: + $ref: '#/components/schemas/HWInvByLocMgmtSwitch' + MgmtHLSwitches: + type: array + description: All appropriate components with HMS type 'MgmtHLSwitch' given + Target component/partition and query type. + readOnly: true + items: + $ref: '#/components/schemas/HWInvByLocMgmtHLSwitch' + CDUMgmtSwitches: + type: array + description: All appropriate components with HMS type 'CDUMgmtSwitch' given + Target component/partition and query type. + readOnly: true + items: + $ref: '#/components/schemas/HWInvByLocCDUMgmtSwitch' Nodes: type: array description: All appropriate components with HMS type 'Node' given Target @@ -9730,6 +9778,9 @@ components: HSNBoards: - "" - "" + CDUMgmtSwitches: + - "" + - "" CabinetPDUs: - "" - "" @@ -9748,6 +9799,9 @@ components: ComputeModules: - "" - "" + MgmtHLSwitches: + - "" + - "" Drives: - "" - "" @@ -9779,6 +9833,9 @@ components: Cabinets: - "" - "" + MgmtSwitches: + - "" + - "" Processors: - "" - "" @@ -9821,6 +9878,9 @@ components: - HWInvByLocRouterModule - HWInvByLocNodeEnclosure - HWInvByLocHSNBoard + - HWInvByLocMgmtSwitch + - HWInvByLocMgmtHLSwitch + - HWInvByLocCDUMgmtSwitch - HWInvByLocNode - HWInvByLocProcessor - HWInvByLocNodeAccel @@ -9963,6 +10023,39 @@ components: properties: HSNBoardLocationInfo: $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisLocationInfo' + HWInvByLocMgmtSwitch: + description: "This is a subtype of HWInventoryByLocation for HMSType MgmtSwitch.\ + \ It represents a management switch. It is selected via the 'discriminator:\ + \ HWInventoryByLocationType' of HWInventoryByLocation when HWInventoryByLocationType\ + \ is 'HWInvByLocMgmtSwitch'." + allOf: + - $ref: '#/components/schemas/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + MgmtSwitchLocationInfo: + $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisLocationInfo' + HWInvByLocMgmtHLSwitch: + description: "This is a subtype of HWInventoryByLocation for HMSType MgmtHLSwitch.\ + \ It represents a high level management switch. It is selected via the 'discriminator:\ + \ HWInventoryByLocationType' of HWInventoryByLocation when HWInventoryByLocationType\ + \ is 'HWInvByLocMgmtHLSwitch'." + allOf: + - $ref: '#/components/schemas/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + MgmtHLSwitchLocationInfo: + $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisLocationInfo' + HWInvByLocCDUMgmtSwitch: + description: "This is a subtype of HWInventoryByLocation for HMSType CDUMgmtSwitch.\ + \ It represents a CDU management switch. It is selected via the 'discriminator:\ + \ HWInventoryByLocationType' of HWInventoryByLocation when HWInventoryByLocationType\ + \ is 'HWInvByLocCDUMgmtSwitch'." + allOf: + - $ref: '#/components/schemas/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + CDUMgmtSwitchLocationInfo: + $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisLocationInfo' HWInvByLocNode: description: "This is a subtype of HWInventoryByLocation for HMSType Node. It\ \ represents a service, compute, or system node. It is selected via the 'discriminator:\ @@ -10526,6 +10619,9 @@ components: - HWInvByFRURouterModule - HWInvByFRUNodeEnclosure - HWInvByFRUHSNBoard + - HWInvByFRUMgmtSwitch + - HWInvByFRUMgmtHLSwitch + - HWInvByFRUCDUMgmtSwitch - HWInvByFRUNode - HWInvByFRUProcessor - HWInvByFRUNodeAccel @@ -10626,6 +10722,36 @@ components: properties: HSNBoardFRUInfo: $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisFRUInfo' + HWInvByFRUMgmtSwitch: + description: "This is a subtype of HWInventoryByFRU for HMSType MgmtSwitch.\ + \ It represents a management switch. It is selected via the 'discriminator:\ + \ HWInventoryByFRUType' of HWInventoryByFRU when HWInventoryByFRUType is 'HWInvByFRUMgmtSwitch'." + allOf: + - $ref: '#/components/schemas/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + MgmtSwitchFRUInfo: + $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisFRUInfo' + HWInvByFRUMgmtHLSwitch: + description: "This is a subtype of HWInventoryByFRU for HMSType MgmtHLSwitch.\ + \ It represents a high level management switch. It is selected via the 'discriminator:\ + \ HWInventoryByFRUType' of HWInventoryByFRU when HWInventoryByFRUType is 'HWInvByFRUMgmtHLSwitch'." + allOf: + - $ref: '#/components/schemas/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + MgmtHLSwitchFRUInfo: + $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisFRUInfo' + HWInvByFRUCDUMgmtSwitch: + description: "This is a subtype of HWInventoryByFRU for HMSType CDUMgmtSwitch.\ + \ It represents a CDU management switch. It is selected via the 'discriminator:\ + \ HWInventoryByFRUType' of HWInventoryByFRU when HWInventoryByFRUType is 'HWInvByFRUCDUMgmtSwitch'." + allOf: + - $ref: '#/components/schemas/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + CDUMgmtSwitchFRUInfo: + $ref: '#/components/schemas/HWInventory.1.0.0_RedfishChassisFRUInfo' HWInvByFRUNode: description: "This is a subtype of HWInventoryByFRU for HMSType Node. It represents\ \ a service, compute, or system node. It is selected via the 'discriminator:\ @@ -11579,9 +11705,9 @@ components: MacRequired: true Name: Name DiscoveryInfo: - LastStatus: EndpointInvalid + LastDiscoveryAttempt: 2000-01-23T04:56:07.000+00:00 + LastDiscoveryStatus: EndpointInvalid RedfishVersion: RedfishVersion - LastAttempt: 2000-01-23T04:56:07.000+00:00 Type: Node FQDN: FQDN UseSSDP: true @@ -11632,9 +11758,9 @@ components: MacRequired: true Name: Name DiscoveryInfo: - LastStatus: EndpointInvalid + LastDiscoveryAttempt: 2000-01-23T04:56:07.000+00:00 + LastDiscoveryStatus: EndpointInvalid RedfishVersion: RedfishVersion - LastAttempt: 2000-01-23T04:56:07.000+00:00 Type: Node FQDN: FQDN UseSSDP: true @@ -11652,9 +11778,9 @@ components: MacRequired: true Name: Name DiscoveryInfo: - LastStatus: EndpointInvalid + LastDiscoveryAttempt: 2000-01-23T04:56:07.000+00:00 + LastDiscoveryStatus: EndpointInvalid RedfishVersion: RedfishVersion - LastAttempt: 2000-01-23T04:56:07.000+00:00 Type: Node FQDN: FQDN UseSSDP: true @@ -13378,6 +13504,9 @@ components: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive @@ -13746,12 +13875,12 @@ components: RedfishEndpoint.1.0.0_RedfishEndpoint_DiscoveryInfo: type: object properties: - LastAttempt: + LastDiscoveryAttempt: type: string description: The time the last discovery attempt took place. format: date-time readOnly: true - LastStatus: + LastDiscoveryStatus: type: string description: Describes the outcome of the last discovery attempt. readOnly: true @@ -13770,9 +13899,9 @@ components: description: Contains info about the discovery status of the given endpoint. readOnly: true example: - LastStatus: EndpointInvalid + LastDiscoveryAttempt: 2000-01-23T04:56:07.000+00:00 + LastDiscoveryStatus: EndpointInvalid RedfishVersion: RedfishVersion - LastAttempt: 2000-01-23T04:56:07.000+00:00 Actions_1.0.0_ChassisActions_Chassis.Reset: type: object properties: @@ -13957,6 +14086,9 @@ components: - NodeEnclosure - NodeEnclosurePowerSupply - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch - Node - Processor - Drive diff --git a/pkg/hsm-client/api_redfish_endpoint.go b/pkg/hsm-client/api_redfish_endpoint.go index 833909df..282b2834 100644 --- a/pkg/hsm-client/api_redfish_endpoint.go +++ b/pkg/hsm-client/api_redfish_endpoint.go @@ -743,18 +743,18 @@ Retrieve all Redfish endpoint entries as a named array, optionally filtering it. * @param "Uuid" (optional.String) - Retrieve the RedfishEndpoint with the given UUID. * @param "Macaddr" (optional.String) - Retrieve the RedfishEndpoint with the given MAC address. * @param "Ipaddress" (optional.String) - Retrieve the RedfishEndpoint with the given IP address. A blank string will get Redfish endpoints without IP addresses. - * @param "Laststatus" (optional.String) - Retrieve the RedfishEndpoints with the given discovery status. This can be negated (i.e. !DiscoverOK). Valid values are: EndpointInvalid, EPResponseFailedDecode, HTTPsGetFailed, NotYetQueried, VerificationFailed, ChildVerificationFailed, DiscoverOK + * @param "Lastdiscoverystatus" (optional.String) - Retrieve the RedfishEndpoints with the given discovery status. This can be negated (i.e. !DiscoverOK). Valid values are: EndpointInvalid, EPResponseFailedDecode, HTTPsGetFailed, NotYetQueried, VerificationFailed, ChildVerificationFailed, DiscoverOK @return RedfishEndpointArrayRedfishEndpointArray */ type RedfishEndpointApiDoRedfishEndpointsGetOpts struct { - Id optional.String - Fqdn optional.String - Type_ optional.String - Uuid optional.String - Macaddr optional.String - Ipaddress optional.String - Laststatus optional.String + Id optional.String + Fqdn optional.String + Type_ optional.String + Uuid optional.String + Macaddr optional.String + Ipaddress optional.String + Lastdiscoverystatus optional.String } func (a *RedfishEndpointApiService) DoRedfishEndpointsGet(ctx context.Context, localVarOptionals *RedfishEndpointApiDoRedfishEndpointsGetOpts) (RedfishEndpointArrayRedfishEndpointArray, *http.Response, error) { @@ -791,8 +791,8 @@ func (a *RedfishEndpointApiService) DoRedfishEndpointsGet(ctx context.Context, l if localVarOptionals != nil && localVarOptionals.Ipaddress.IsSet() { localVarQueryParams.Add("ipaddress", parameterToString(localVarOptionals.Ipaddress.Value(), "")) } - if localVarOptionals != nil && localVarOptionals.Laststatus.IsSet() { - localVarQueryParams.Add("laststatus", parameterToString(localVarOptionals.Laststatus.Value(), "")) + if localVarOptionals != nil && localVarOptionals.Lastdiscoverystatus.IsSet() { + localVarQueryParams.Add("lastdiscoverystatus", parameterToString(localVarOptionals.Lastdiscoverystatus.Value(), "")) } // to determine the Content-Type header localVarHttpContentTypes := []string{} diff --git a/pkg/hsm-client/client.go b/pkg/hsm-client/client.go index 769653ae..4f2005d3 100644 --- a/pkg/hsm-client/client.go +++ b/pkg/hsm-client/client.go @@ -419,7 +419,7 @@ func (c *APIClient) decode(v interface{}, b []byte, contentType string) (err err } return nil } - return fmt.Errorf("unsupported content type: %s", contentType) + return errors.New("undefined response type") } // Add a file to the multipart request diff --git a/pkg/hsm-client/docs/HwInvByFruMgmtHlSwitch.md b/pkg/hsm-client/docs/HwInvByFruMgmtHlSwitch.md new file mode 100644 index 00000000..709f3f5a --- /dev/null +++ b/pkg/hsm-client/docs/HwInvByFruMgmtHlSwitch.md @@ -0,0 +1,13 @@ +# HwInvByFruMgmtHlSwitch + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**FRUID** | **string** | | [optional] [default to null] +**Type_** | [***HmsType100**](HMSType.1.0.0.md) | | [optional] [default to null] +**FRUSubtype** | **string** | TBD. | [optional] [default to null] +**HWInventoryByFRUType** | **string** | This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. | [default to null] +**MgmtHLSwitchFRUInfo** | [***HwInventory100RedfishChassisFruInfo**](HWInventory.1.0.0_RedfishChassisFRUInfo.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/hsm-client/docs/HwInvByFruMgmtSwitch.md b/pkg/hsm-client/docs/HwInvByFruMgmtSwitch.md new file mode 100644 index 00000000..62efada8 --- /dev/null +++ b/pkg/hsm-client/docs/HwInvByFruMgmtSwitch.md @@ -0,0 +1,13 @@ +# HwInvByFruMgmtSwitch + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**FRUID** | **string** | | [optional] [default to null] +**Type_** | [***HmsType100**](HMSType.1.0.0.md) | | [optional] [default to null] +**FRUSubtype** | **string** | TBD. | [optional] [default to null] +**HWInventoryByFRUType** | **string** | This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. | [default to null] +**MgmtSwitchFRUInfo** | [***HwInventory100RedfishChassisFruInfo**](HWInventory.1.0.0_RedfishChassisFRUInfo.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/hsm-client/docs/HwInvByFrucduMgmtSwitch.md b/pkg/hsm-client/docs/HwInvByFrucduMgmtSwitch.md new file mode 100644 index 00000000..e7815b27 --- /dev/null +++ b/pkg/hsm-client/docs/HwInvByFrucduMgmtSwitch.md @@ -0,0 +1,13 @@ +# HwInvByFrucduMgmtSwitch + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**FRUID** | **string** | | [optional] [default to null] +**Type_** | [***HmsType100**](HMSType.1.0.0.md) | | [optional] [default to null] +**FRUSubtype** | **string** | TBD. | [optional] [default to null] +**HWInventoryByFRUType** | **string** | This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. | [default to null] +**CDUMgmtSwitchFRUInfo** | [***HwInventory100RedfishChassisFruInfo**](HWInventory.1.0.0_RedfishChassisFRUInfo.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/hsm-client/docs/HwInvByLocCduMgmtSwitch.md b/pkg/hsm-client/docs/HwInvByLocCduMgmtSwitch.md new file mode 100644 index 00000000..72a4c0f9 --- /dev/null +++ b/pkg/hsm-client/docs/HwInvByLocCduMgmtSwitch.md @@ -0,0 +1,15 @@ +# HwInvByLocCduMgmtSwitch + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**ID** | **string** | | [default to null] +**Type_** | [***HmsType100**](HMSType.1.0.0.md) | | [optional] [default to null] +**Ordinal** | **int32** | This is the normalized (from zero) index of the component location (e.g. slot number) when there are more than one. This should match the last number in the xname in most cases (e.g. Ordinal 0 for node x0c0s0b0n0). Note that Redfish may use a different value or naming scheme, but this is passed through via the *LocationInfo for the type of component. | [optional] [default to null] +**Status** | **string** | Populated or Empty - whether location is populated. | [optional] [default to null] +**HWInventoryByLocationType** | **string** | This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. | [default to null] +**PopulatedFRU** | [***HwInventory100HwInventoryByFru**](HWInventory.1.0.0_HWInventoryByFRU.md) | | [optional] [default to null] +**CDUMgmtSwitchLocationInfo** | [***HwInventory100RedfishChassisLocationInfo**](HWInventory.1.0.0_RedfishChassisLocationInfo.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/hsm-client/docs/HwInvByLocMgmtHlSwitch.md b/pkg/hsm-client/docs/HwInvByLocMgmtHlSwitch.md new file mode 100644 index 00000000..591e7b23 --- /dev/null +++ b/pkg/hsm-client/docs/HwInvByLocMgmtHlSwitch.md @@ -0,0 +1,15 @@ +# HwInvByLocMgmtHlSwitch + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**ID** | **string** | | [default to null] +**Type_** | [***HmsType100**](HMSType.1.0.0.md) | | [optional] [default to null] +**Ordinal** | **int32** | This is the normalized (from zero) index of the component location (e.g. slot number) when there are more than one. This should match the last number in the xname in most cases (e.g. Ordinal 0 for node x0c0s0b0n0). Note that Redfish may use a different value or naming scheme, but this is passed through via the *LocationInfo for the type of component. | [optional] [default to null] +**Status** | **string** | Populated or Empty - whether location is populated. | [optional] [default to null] +**HWInventoryByLocationType** | **string** | This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. | [default to null] +**PopulatedFRU** | [***HwInventory100HwInventoryByFru**](HWInventory.1.0.0_HWInventoryByFRU.md) | | [optional] [default to null] +**MgmtHLSwitchLocationInfo** | [***HwInventory100RedfishChassisLocationInfo**](HWInventory.1.0.0_RedfishChassisLocationInfo.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/hsm-client/docs/HwInvByLocMgmtSwitch.md b/pkg/hsm-client/docs/HwInvByLocMgmtSwitch.md new file mode 100644 index 00000000..d0c880f8 --- /dev/null +++ b/pkg/hsm-client/docs/HwInvByLocMgmtSwitch.md @@ -0,0 +1,15 @@ +# HwInvByLocMgmtSwitch + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**ID** | **string** | | [default to null] +**Type_** | [***HmsType100**](HMSType.1.0.0.md) | | [optional] [default to null] +**Ordinal** | **int32** | This is the normalized (from zero) index of the component location (e.g. slot number) when there are more than one. This should match the last number in the xname in most cases (e.g. Ordinal 0 for node x0c0s0b0n0). Note that Redfish may use a different value or naming scheme, but this is passed through via the *LocationInfo for the type of component. | [optional] [default to null] +**Status** | **string** | Populated or Empty - whether location is populated. | [optional] [default to null] +**HWInventoryByLocationType** | **string** | This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. | [default to null] +**PopulatedFRU** | [***HwInventory100HwInventoryByFru**](HWInventory.1.0.0_HWInventoryByFRU.md) | | [optional] [default to null] +**MgmtSwitchLocationInfo** | [***HwInventory100RedfishChassisLocationInfo**](HWInventory.1.0.0_RedfishChassisLocationInfo.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/hsm-client/docs/HwInventory100HwInventory.md b/pkg/hsm-client/docs/HwInventory100HwInventory.md index 4b5eb59a..b77119b8 100644 --- a/pkg/hsm-client/docs/HwInventory100HwInventory.md +++ b/pkg/hsm-client/docs/HwInventory100HwInventory.md @@ -11,6 +11,9 @@ Name | Type | Description | Notes **RouterModules** | [**[]HwInvByLocRouterModule**](HWInvByLocRouterModule.md) | All appropriate components with HMS type 'RouterModule' given Target component/partition and query type. | [optional] [default to null] **NodeEnclosures** | [**[]HwInvByLocNodeEnclosure**](HWInvByLocNodeEnclosure.md) | All appropriate components with HMS type 'NodeEnclosure' given Target component/partition and query type. | [optional] [default to null] **HSNBoards** | [**[]HwInvByLocHsnBoard**](HWInvByLocHSNBoard.md) | All appropriate components with HMS type 'HSNBoard' given Target component/partition and query type. | [optional] [default to null] +**MgmtSwitches** | [**[]HwInvByLocMgmtSwitch**](HWInvByLocMgmtSwitch.md) | All appropriate components with HMS type 'MgmtSwitch' given Target component/partition and query type. | [optional] [default to null] +**MgmtHLSwitches** | [**[]HwInvByLocMgmtHlSwitch**](HWInvByLocMgmtHLSwitch.md) | All appropriate components with HMS type 'MgmtHLSwitch' given Target component/partition and query type. | [optional] [default to null] +**CDUMgmtSwitches** | [**[]HwInvByLocCduMgmtSwitch**](HWInvByLocCDUMgmtSwitch.md) | All appropriate components with HMS type 'CDUMgmtSwitch' given Target component/partition and query type. | [optional] [default to null] **Nodes** | [**[]HwInvByLocNode**](HWInvByLocNode.md) | All appropriate components with HMS type 'Node' given Target component/partition and query type. | [optional] [default to null] **Processors** | [**[]HwInvByLocProcessor**](HWInvByLocProcessor.md) | All appropriate components with HMS type 'Processor' given Target component/partition and query type. | [optional] [default to null] **NodeAccels** | [**[]HwInvByLocNodeAccel**](HWInvByLocNodeAccel.md) | All appropriate components with HMS type 'NodeAccel' given Target component/partition and query type. | [optional] [default to null] diff --git a/pkg/hsm-client/docs/RedfishEndpoint100RedfishEndpointDiscoveryInfo.md b/pkg/hsm-client/docs/RedfishEndpoint100RedfishEndpointDiscoveryInfo.md index 71e56822..be2d5326 100644 --- a/pkg/hsm-client/docs/RedfishEndpoint100RedfishEndpointDiscoveryInfo.md +++ b/pkg/hsm-client/docs/RedfishEndpoint100RedfishEndpointDiscoveryInfo.md @@ -3,8 +3,8 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**LastAttempt** | [**time.Time**](time.Time.md) | The time the last discovery attempt took place. | [optional] [default to null] -**LastStatus** | **string** | Describes the outcome of the last discovery attempt. | [optional] [default to null] +**LastDiscoveryAttempt** | [**time.Time**](time.Time.md) | The time the last discovery attempt took place. | [optional] [default to null] +**LastDiscoveryStatus** | **string** | Describes the outcome of the last discovery attempt. | [optional] [default to null] **RedfishVersion** | **string** | Version of Redfish as reported by the RF service root. | [optional] [default to null] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/pkg/hsm-client/docs/RedfishEndpointApi.md b/pkg/hsm-client/docs/RedfishEndpointApi.md index d3b34613..980d57ca 100644 --- a/pkg/hsm-client/docs/RedfishEndpointApi.md +++ b/pkg/hsm-client/docs/RedfishEndpointApi.md @@ -202,7 +202,7 @@ Name | Type | Description | Notes **uuid** | **optional.String**| Retrieve the RedfishEndpoint with the given UUID. | **macaddr** | **optional.String**| Retrieve the RedfishEndpoint with the given MAC address. | **ipaddress** | **optional.String**| Retrieve the RedfishEndpoint with the given IP address. A blank string will get Redfish endpoints without IP addresses. | - **laststatus** | **optional.String**| Retrieve the RedfishEndpoints with the given discovery status. This can be negated (i.e. !DiscoverOK). Valid values are: EndpointInvalid, EPResponseFailedDecode, HTTPsGetFailed, NotYetQueried, VerificationFailed, ChildVerificationFailed, DiscoverOK | + **lastdiscoverystatus** | **optional.String**| Retrieve the RedfishEndpoints with the given discovery status. This can be negated (i.e. !DiscoverOK). Valid values are: EndpointInvalid, EPResponseFailedDecode, HTTPsGetFailed, NotYetQueried, VerificationFailed, ChildVerificationFailed, DiscoverOK | ### Return type diff --git a/pkg/hsm-client/model_hms_type_1_0_0.go b/pkg/hsm-client/model_hms_type_1_0_0.go index fd6268bf..eb0f2c4a 100644 --- a/pkg/hsm-client/model_hms_type_1_0_0.go +++ b/pkg/hsm-client/model_hms_type_1_0_0.go @@ -31,6 +31,9 @@ const ( NODE_ENCLOSURE_HmsType100 HmsType100 = "NodeEnclosure" NODE_ENCLOSURE_POWER_SUPPLY_HmsType100 HmsType100 = "NodeEnclosurePowerSupply" HSN_BOARD_HmsType100 HmsType100 = "HSNBoard" + MGMT_SWITCH_HmsType100 HmsType100 = "MgmtSwitch" + MGMT_HL_SWITCH_HmsType100 HmsType100 = "MgmtHLSwitch" + CDU_MGMT_SWITCH_HmsType100 HmsType100 = "CDUMgmtSwitch" NODE_HmsType100 HmsType100 = "Node" PROCESSOR_HmsType100 HmsType100 = "Processor" DRIVE_HmsType100 HmsType100 = "Drive" diff --git a/pkg/hsm-client/model_hw_inv_by_fru_mgmt_hl_switch.go b/pkg/hsm-client/model_hw_inv_by_fru_mgmt_hl_switch.go new file mode 100644 index 00000000..7f16da39 --- /dev/null +++ b/pkg/hsm-client/model_hw_inv_by_fru_mgmt_hl_switch.go @@ -0,0 +1,20 @@ +/* + * Hardware State Manager API + * + * The Hardware State Manager (HSM) inventories, monitors, and manages hardware, and tracks the logical and dynamic component states, such as roles, NIDs, and other basic metadata needed to provide most common administrative and operational functions. HSM is the single source of truth for the state of the system. It contains the component state and information on Redfish endpoints for communicating with components via Redfish. It also allows administrators to create partitions and groups for other uses. ## Resources ### /State/Components HMS components are created during inventory discovery and provide a higher-level representation of the component, including state, NID, role (i.e. compute/service), subtype, and so on. Unlike ComponentEndpoints, however, they are not strictly linked to the parent RedfishEndpoint, and are not automatically deleted when the RedfishEndpoints are (though they can be deleted via a separate call). This is because these components can also represent abstract components, such as removed components (e.g. which would remain, but have their states changed to \"Empty\" upon removal). ### /Defaults/NodeMaps This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to Node IDs, and optionally, to roles and subroles. These mappings are used when discovering nodes for the first time. These mappings should be uploaded prior to discovery and should contain mappings for each valid node xname in the system, whether populated or not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally role and subrole. Role can be Compute, Application, Storage, Management etc. The NodeMaps collection can be uploaded to HSM automatically at install time by specifying it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, and inventory discovery is performed by HSM. The desired NID numbers will be set as soon as the nodes are created using the NodeMaps collection. It is recommended that Nodemaps are uploaded at install time before discovery happens. If they are uploaded after discovery, then the node xnames need to be manually updated with the correct NIDs. You can update NIDs for individual components by using PATCH /State/Components/{xname}/NID. ### /Inventory/Hardware This resource shows the hardware inventory of the entire system and contains FRU information in location. All entries are displayed as a flat array. ### /Inventory/HardwareByFRU Every component has FRU information. This resource shows the hardware inventory for all FRUs or for a specific FRU irrespective of the location. This information is constant regardless of where the hardware item is currently in the system. If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will have the corresponding HWInventoryByFRU object embedded. This FRU info can also be looked up by FRU ID regardless of the current location. ### /Inventory/Hardware/Query/{xname} This resource gets you information about a specific component and it's sub-components. The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the entire system. ### /Inventory/RedfishEndpoints This is a BMC or other Redfish controller that has a Redfish entry point and Redfish service root. It is used to discover the components managed by this endpoint during discovery and handles all Redfish interactions by these subcomponents. If the endpoint has been discovered, this entry will include the ComponentEndpoint entries for these managed subcomponents. You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. The xname identifies the location of all components in the system, including chassis, controllers, nodes, and so on. Redfish endpoints are given to State Manager. ### /Inventory/ComponentEndpoints Component Endpoints are the specific URLs for each individual component that are under the Redfish endpoint. Component endpoints are discovered during inventory discovery. They are the management-plane representation of system components and are linked to the parent Redfish Endpoint. They provide a glue layer to bridge the higher-level representation of a component with how it is represented locally by Redfish. The collection of ComponentEndpoints can be obtained in full, optionally filtered on certain criteria (e.g. obtain just Node components), or accessed by their xname IDs individually. ### /Inventory/ServiceEndpoints ServiceEndpoints help you do things on Redfish like updating the firmware. They are discovered during inventory discovery. ### /groups Groups are named sets of system components, most commonly nodes. A group groups components under an administratively chosen label (group name). Each component may belong to any number of groups. If a group has exclusiveGroup= set, then a node may only be a member of one group that matches that exclusive label. For example, if the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', then a component that is part of 'green' could not also be placed in 'red'. You can create, modify, or delete a group and its members. You can also use group names as filters for API calls. ### /partitions A partition is a formal, non-overlapping division of the system that forms an administratively distinct sub-system. Each component may belong to at most one partition. Partitions are used as an access control mechanism or for implementing multi-tenancy. You can create, modify, or delete a partition and its members. You can also use partitions as filters for other API calls. ### /memberships A membership shows the association of a component xname to its set of group labels and partition names. There can be many group labels and up to one partition per component. Memberships are not modified directly, as the underlying group or partition is modified instead. A component can be removed from one of the listed groups or partitions or added via POST as well as being present in the initial set of members when a partition or group is created. You can retrieve the memberships for components or memberships for a specific xname. ### /Inventory/DiscoveryStatus Check discovery status for all components or you can track the status for a specific job ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains status information about the discovery operation for clients to query. The discover operation returns a link or links to status objects so that a client can determine when the discovery operation is complete. ### /Inventory/Discover Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint objects are created, inventory discovery will query these controllers and create or update management plane and managed plane objects representing the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). ### /Subscriptions/SCN Manage subscriptions to state change notifications (SCNs) from HSM. You can also subscribe to state change notifications by using the HMS Notification Fanout Daemon API. ## Workflows ### Add and Delete a Redfish Endpoint #### POST /Inventory/RedfishEndpoints When you manually create Redfish endpoints, the discovery is automatically initiated. You would create Redfish endpoints for components that are not automatically discovered by REDS or MEDS. #### GET /Inventory/RedfishEndpoints Check the Redfish endpoints that have been added and check the status of discovery. #### DELETE /Inventory/RedfishEndpoints/{xname} Delete a specific Redfish endpoint. ### Perform Inventory Discovery #### POST /Inventory/Discover Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. If needed, specify an ID or hostname (xname) in the payload. #### GET /Inventory/DiscoveryStatus Check the discovery status of all Redfish endpoints. You can also check the discovery status for each individual component by providing ID. ### Query and Update HMS Components (State/NID) #### GET /State/Components Retrieve all HMS Components found by inventory discovery as a named (\"Components\") array. #### PATCH /State/Components/{xname}/Enabled Modify the component's Enabled field. #### DELETE /State/Components/{xname} Delete a specific HMS component by providing its xname. As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints are deleted. ### Create and Delete a New Group #### GET /hsm/v2/State/Components Retrieve a list of desired components and their state. Select the nodes that you want to group. #### POST /groups Create the new group with desired members. Provide a group label (required), description, name, members etc. in the JSON payload. #### GET /groups/{group_label} Retrieve the group that was create with the label. #### GET /State/Components/{group_label} Retrieve the current state for all the components in the group. #### DELETE /groups/{group_label} Delete the group specified by {group_label}. ## Valid State Transitions ``` Prior State -> New State - Reason Ready -> Standby - HBTD if node has many missed heartbeats Ready -> Ready/Warning - HBTD if node has a few missed heartbeats Standby -> Ready - HBTD Node re-starts heartbeating On -> Ready - HBTD Node started heartbeating Off -> Ready - HBTD sees heartbeats before Redfish Event (On) Standby -> On - Redfish Event (On) or if re-discovered while in the standby state Off -> On - Redfish Event (On) Standby -> Off - Redfish Event (Off) Ready -> Off - Redfish Event (Off) On -> Off - Redfish Event (Off) Any State -> Empty - Redfish Endpoint is disabled meaning component removal ``` Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package hsm_client + +// This is a subtype of HWInventoryByFRU for HMSType MgmtHLSwitch. It represents a high level management switch. It is selected via the 'discriminator: HWInventoryByFRUType' of HWInventoryByFRU when HWInventoryByFRUType is 'HWInvByFRUMgmtHLSwitch'. +type HwInvByFruMgmtHlSwitch struct { + FRUID string `json:"FRUID,omitempty"` + Type_ *HmsType100 `json:"Type,omitempty"` + // TBD. + FRUSubtype string `json:"FRUSubtype,omitempty"` + // This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. + HWInventoryByFRUType string `json:"HWInventoryByFRUType"` + MgmtHLSwitchFRUInfo *HwInventory100RedfishChassisFruInfo `json:"MgmtHLSwitchFRUInfo,omitempty"` +} diff --git a/pkg/hsm-client/model_hw_inv_by_fru_mgmt_switch.go b/pkg/hsm-client/model_hw_inv_by_fru_mgmt_switch.go new file mode 100644 index 00000000..9f824443 --- /dev/null +++ b/pkg/hsm-client/model_hw_inv_by_fru_mgmt_switch.go @@ -0,0 +1,20 @@ +/* + * Hardware State Manager API + * + * The Hardware State Manager (HSM) inventories, monitors, and manages hardware, and tracks the logical and dynamic component states, such as roles, NIDs, and other basic metadata needed to provide most common administrative and operational functions. HSM is the single source of truth for the state of the system. It contains the component state and information on Redfish endpoints for communicating with components via Redfish. It also allows administrators to create partitions and groups for other uses. ## Resources ### /State/Components HMS components are created during inventory discovery and provide a higher-level representation of the component, including state, NID, role (i.e. compute/service), subtype, and so on. Unlike ComponentEndpoints, however, they are not strictly linked to the parent RedfishEndpoint, and are not automatically deleted when the RedfishEndpoints are (though they can be deleted via a separate call). This is because these components can also represent abstract components, such as removed components (e.g. which would remain, but have their states changed to \"Empty\" upon removal). ### /Defaults/NodeMaps This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to Node IDs, and optionally, to roles and subroles. These mappings are used when discovering nodes for the first time. These mappings should be uploaded prior to discovery and should contain mappings for each valid node xname in the system, whether populated or not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally role and subrole. Role can be Compute, Application, Storage, Management etc. The NodeMaps collection can be uploaded to HSM automatically at install time by specifying it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, and inventory discovery is performed by HSM. The desired NID numbers will be set as soon as the nodes are created using the NodeMaps collection. It is recommended that Nodemaps are uploaded at install time before discovery happens. If they are uploaded after discovery, then the node xnames need to be manually updated with the correct NIDs. You can update NIDs for individual components by using PATCH /State/Components/{xname}/NID. ### /Inventory/Hardware This resource shows the hardware inventory of the entire system and contains FRU information in location. All entries are displayed as a flat array. ### /Inventory/HardwareByFRU Every component has FRU information. This resource shows the hardware inventory for all FRUs or for a specific FRU irrespective of the location. This information is constant regardless of where the hardware item is currently in the system. If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will have the corresponding HWInventoryByFRU object embedded. This FRU info can also be looked up by FRU ID regardless of the current location. ### /Inventory/Hardware/Query/{xname} This resource gets you information about a specific component and it's sub-components. The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the entire system. ### /Inventory/RedfishEndpoints This is a BMC or other Redfish controller that has a Redfish entry point and Redfish service root. It is used to discover the components managed by this endpoint during discovery and handles all Redfish interactions by these subcomponents. If the endpoint has been discovered, this entry will include the ComponentEndpoint entries for these managed subcomponents. You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. The xname identifies the location of all components in the system, including chassis, controllers, nodes, and so on. Redfish endpoints are given to State Manager. ### /Inventory/ComponentEndpoints Component Endpoints are the specific URLs for each individual component that are under the Redfish endpoint. Component endpoints are discovered during inventory discovery. They are the management-plane representation of system components and are linked to the parent Redfish Endpoint. They provide a glue layer to bridge the higher-level representation of a component with how it is represented locally by Redfish. The collection of ComponentEndpoints can be obtained in full, optionally filtered on certain criteria (e.g. obtain just Node components), or accessed by their xname IDs individually. ### /Inventory/ServiceEndpoints ServiceEndpoints help you do things on Redfish like updating the firmware. They are discovered during inventory discovery. ### /groups Groups are named sets of system components, most commonly nodes. A group groups components under an administratively chosen label (group name). Each component may belong to any number of groups. If a group has exclusiveGroup= set, then a node may only be a member of one group that matches that exclusive label. For example, if the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', then a component that is part of 'green' could not also be placed in 'red'. You can create, modify, or delete a group and its members. You can also use group names as filters for API calls. ### /partitions A partition is a formal, non-overlapping division of the system that forms an administratively distinct sub-system. Each component may belong to at most one partition. Partitions are used as an access control mechanism or for implementing multi-tenancy. You can create, modify, or delete a partition and its members. You can also use partitions as filters for other API calls. ### /memberships A membership shows the association of a component xname to its set of group labels and partition names. There can be many group labels and up to one partition per component. Memberships are not modified directly, as the underlying group or partition is modified instead. A component can be removed from one of the listed groups or partitions or added via POST as well as being present in the initial set of members when a partition or group is created. You can retrieve the memberships for components or memberships for a specific xname. ### /Inventory/DiscoveryStatus Check discovery status for all components or you can track the status for a specific job ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains status information about the discovery operation for clients to query. The discover operation returns a link or links to status objects so that a client can determine when the discovery operation is complete. ### /Inventory/Discover Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint objects are created, inventory discovery will query these controllers and create or update management plane and managed plane objects representing the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). ### /Subscriptions/SCN Manage subscriptions to state change notifications (SCNs) from HSM. You can also subscribe to state change notifications by using the HMS Notification Fanout Daemon API. ## Workflows ### Add and Delete a Redfish Endpoint #### POST /Inventory/RedfishEndpoints When you manually create Redfish endpoints, the discovery is automatically initiated. You would create Redfish endpoints for components that are not automatically discovered by REDS or MEDS. #### GET /Inventory/RedfishEndpoints Check the Redfish endpoints that have been added and check the status of discovery. #### DELETE /Inventory/RedfishEndpoints/{xname} Delete a specific Redfish endpoint. ### Perform Inventory Discovery #### POST /Inventory/Discover Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. If needed, specify an ID or hostname (xname) in the payload. #### GET /Inventory/DiscoveryStatus Check the discovery status of all Redfish endpoints. You can also check the discovery status for each individual component by providing ID. ### Query and Update HMS Components (State/NID) #### GET /State/Components Retrieve all HMS Components found by inventory discovery as a named (\"Components\") array. #### PATCH /State/Components/{xname}/Enabled Modify the component's Enabled field. #### DELETE /State/Components/{xname} Delete a specific HMS component by providing its xname. As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints are deleted. ### Create and Delete a New Group #### GET /hsm/v2/State/Components Retrieve a list of desired components and their state. Select the nodes that you want to group. #### POST /groups Create the new group with desired members. Provide a group label (required), description, name, members etc. in the JSON payload. #### GET /groups/{group_label} Retrieve the group that was create with the label. #### GET /State/Components/{group_label} Retrieve the current state for all the components in the group. #### DELETE /groups/{group_label} Delete the group specified by {group_label}. ## Valid State Transitions ``` Prior State -> New State - Reason Ready -> Standby - HBTD if node has many missed heartbeats Ready -> Ready/Warning - HBTD if node has a few missed heartbeats Standby -> Ready - HBTD Node re-starts heartbeating On -> Ready - HBTD Node started heartbeating Off -> Ready - HBTD sees heartbeats before Redfish Event (On) Standby -> On - Redfish Event (On) or if re-discovered while in the standby state Off -> On - Redfish Event (On) Standby -> Off - Redfish Event (Off) Ready -> Off - Redfish Event (Off) On -> Off - Redfish Event (Off) Any State -> Empty - Redfish Endpoint is disabled meaning component removal ``` Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package hsm_client + +// This is a subtype of HWInventoryByFRU for HMSType MgmtSwitch. It represents a management switch. It is selected via the 'discriminator: HWInventoryByFRUType' of HWInventoryByFRU when HWInventoryByFRUType is 'HWInvByFRUMgmtSwitch'. +type HwInvByFruMgmtSwitch struct { + FRUID string `json:"FRUID,omitempty"` + Type_ *HmsType100 `json:"Type,omitempty"` + // TBD. + FRUSubtype string `json:"FRUSubtype,omitempty"` + // This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. + HWInventoryByFRUType string `json:"HWInventoryByFRUType"` + MgmtSwitchFRUInfo *HwInventory100RedfishChassisFruInfo `json:"MgmtSwitchFRUInfo,omitempty"` +} diff --git a/pkg/hsm-client/model_hw_inv_by_frucdu_mgmt_switch.go b/pkg/hsm-client/model_hw_inv_by_frucdu_mgmt_switch.go new file mode 100644 index 00000000..3ab8a071 --- /dev/null +++ b/pkg/hsm-client/model_hw_inv_by_frucdu_mgmt_switch.go @@ -0,0 +1,20 @@ +/* + * Hardware State Manager API + * + * The Hardware State Manager (HSM) inventories, monitors, and manages hardware, and tracks the logical and dynamic component states, such as roles, NIDs, and other basic metadata needed to provide most common administrative and operational functions. HSM is the single source of truth for the state of the system. It contains the component state and information on Redfish endpoints for communicating with components via Redfish. It also allows administrators to create partitions and groups for other uses. ## Resources ### /State/Components HMS components are created during inventory discovery and provide a higher-level representation of the component, including state, NID, role (i.e. compute/service), subtype, and so on. Unlike ComponentEndpoints, however, they are not strictly linked to the parent RedfishEndpoint, and are not automatically deleted when the RedfishEndpoints are (though they can be deleted via a separate call). This is because these components can also represent abstract components, such as removed components (e.g. which would remain, but have their states changed to \"Empty\" upon removal). ### /Defaults/NodeMaps This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to Node IDs, and optionally, to roles and subroles. These mappings are used when discovering nodes for the first time. These mappings should be uploaded prior to discovery and should contain mappings for each valid node xname in the system, whether populated or not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally role and subrole. Role can be Compute, Application, Storage, Management etc. The NodeMaps collection can be uploaded to HSM automatically at install time by specifying it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, and inventory discovery is performed by HSM. The desired NID numbers will be set as soon as the nodes are created using the NodeMaps collection. It is recommended that Nodemaps are uploaded at install time before discovery happens. If they are uploaded after discovery, then the node xnames need to be manually updated with the correct NIDs. You can update NIDs for individual components by using PATCH /State/Components/{xname}/NID. ### /Inventory/Hardware This resource shows the hardware inventory of the entire system and contains FRU information in location. All entries are displayed as a flat array. ### /Inventory/HardwareByFRU Every component has FRU information. This resource shows the hardware inventory for all FRUs or for a specific FRU irrespective of the location. This information is constant regardless of where the hardware item is currently in the system. If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will have the corresponding HWInventoryByFRU object embedded. This FRU info can also be looked up by FRU ID regardless of the current location. ### /Inventory/Hardware/Query/{xname} This resource gets you information about a specific component and it's sub-components. The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the entire system. ### /Inventory/RedfishEndpoints This is a BMC or other Redfish controller that has a Redfish entry point and Redfish service root. It is used to discover the components managed by this endpoint during discovery and handles all Redfish interactions by these subcomponents. If the endpoint has been discovered, this entry will include the ComponentEndpoint entries for these managed subcomponents. You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. The xname identifies the location of all components in the system, including chassis, controllers, nodes, and so on. Redfish endpoints are given to State Manager. ### /Inventory/ComponentEndpoints Component Endpoints are the specific URLs for each individual component that are under the Redfish endpoint. Component endpoints are discovered during inventory discovery. They are the management-plane representation of system components and are linked to the parent Redfish Endpoint. They provide a glue layer to bridge the higher-level representation of a component with how it is represented locally by Redfish. The collection of ComponentEndpoints can be obtained in full, optionally filtered on certain criteria (e.g. obtain just Node components), or accessed by their xname IDs individually. ### /Inventory/ServiceEndpoints ServiceEndpoints help you do things on Redfish like updating the firmware. They are discovered during inventory discovery. ### /groups Groups are named sets of system components, most commonly nodes. A group groups components under an administratively chosen label (group name). Each component may belong to any number of groups. If a group has exclusiveGroup= set, then a node may only be a member of one group that matches that exclusive label. For example, if the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', then a component that is part of 'green' could not also be placed in 'red'. You can create, modify, or delete a group and its members. You can also use group names as filters for API calls. ### /partitions A partition is a formal, non-overlapping division of the system that forms an administratively distinct sub-system. Each component may belong to at most one partition. Partitions are used as an access control mechanism or for implementing multi-tenancy. You can create, modify, or delete a partition and its members. You can also use partitions as filters for other API calls. ### /memberships A membership shows the association of a component xname to its set of group labels and partition names. There can be many group labels and up to one partition per component. Memberships are not modified directly, as the underlying group or partition is modified instead. A component can be removed from one of the listed groups or partitions or added via POST as well as being present in the initial set of members when a partition or group is created. You can retrieve the memberships for components or memberships for a specific xname. ### /Inventory/DiscoveryStatus Check discovery status for all components or you can track the status for a specific job ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains status information about the discovery operation for clients to query. The discover operation returns a link or links to status objects so that a client can determine when the discovery operation is complete. ### /Inventory/Discover Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint objects are created, inventory discovery will query these controllers and create or update management plane and managed plane objects representing the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). ### /Subscriptions/SCN Manage subscriptions to state change notifications (SCNs) from HSM. You can also subscribe to state change notifications by using the HMS Notification Fanout Daemon API. ## Workflows ### Add and Delete a Redfish Endpoint #### POST /Inventory/RedfishEndpoints When you manually create Redfish endpoints, the discovery is automatically initiated. You would create Redfish endpoints for components that are not automatically discovered by REDS or MEDS. #### GET /Inventory/RedfishEndpoints Check the Redfish endpoints that have been added and check the status of discovery. #### DELETE /Inventory/RedfishEndpoints/{xname} Delete a specific Redfish endpoint. ### Perform Inventory Discovery #### POST /Inventory/Discover Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. If needed, specify an ID or hostname (xname) in the payload. #### GET /Inventory/DiscoveryStatus Check the discovery status of all Redfish endpoints. You can also check the discovery status for each individual component by providing ID. ### Query and Update HMS Components (State/NID) #### GET /State/Components Retrieve all HMS Components found by inventory discovery as a named (\"Components\") array. #### PATCH /State/Components/{xname}/Enabled Modify the component's Enabled field. #### DELETE /State/Components/{xname} Delete a specific HMS component by providing its xname. As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints are deleted. ### Create and Delete a New Group #### GET /hsm/v2/State/Components Retrieve a list of desired components and their state. Select the nodes that you want to group. #### POST /groups Create the new group with desired members. Provide a group label (required), description, name, members etc. in the JSON payload. #### GET /groups/{group_label} Retrieve the group that was create with the label. #### GET /State/Components/{group_label} Retrieve the current state for all the components in the group. #### DELETE /groups/{group_label} Delete the group specified by {group_label}. ## Valid State Transitions ``` Prior State -> New State - Reason Ready -> Standby - HBTD if node has many missed heartbeats Ready -> Ready/Warning - HBTD if node has a few missed heartbeats Standby -> Ready - HBTD Node re-starts heartbeating On -> Ready - HBTD Node started heartbeating Off -> Ready - HBTD sees heartbeats before Redfish Event (On) Standby -> On - Redfish Event (On) or if re-discovered while in the standby state Off -> On - Redfish Event (On) Standby -> Off - Redfish Event (Off) Ready -> Off - Redfish Event (Off) On -> Off - Redfish Event (Off) Any State -> Empty - Redfish Endpoint is disabled meaning component removal ``` Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package hsm_client + +// This is a subtype of HWInventoryByFRU for HMSType CDUMgmtSwitch. It represents a CDU management switch. It is selected via the 'discriminator: HWInventoryByFRUType' of HWInventoryByFRU when HWInventoryByFRUType is 'HWInvByFRUCDUMgmtSwitch'. +type HwInvByFrucduMgmtSwitch struct { + FRUID string `json:"FRUID,omitempty"` + Type_ *HmsType100 `json:"Type,omitempty"` + // TBD. + FRUSubtype string `json:"FRUSubtype,omitempty"` + // This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. + HWInventoryByFRUType string `json:"HWInventoryByFRUType"` + CDUMgmtSwitchFRUInfo *HwInventory100RedfishChassisFruInfo `json:"CDUMgmtSwitchFRUInfo,omitempty"` +} diff --git a/pkg/hsm-client/model_hw_inv_by_loc_cdu_mgmt_switch.go b/pkg/hsm-client/model_hw_inv_by_loc_cdu_mgmt_switch.go new file mode 100644 index 00000000..3840f878 --- /dev/null +++ b/pkg/hsm-client/model_hw_inv_by_loc_cdu_mgmt_switch.go @@ -0,0 +1,23 @@ +/* + * Hardware State Manager API + * + * The Hardware State Manager (HSM) inventories, monitors, and manages hardware, and tracks the logical and dynamic component states, such as roles, NIDs, and other basic metadata needed to provide most common administrative and operational functions. HSM is the single source of truth for the state of the system. It contains the component state and information on Redfish endpoints for communicating with components via Redfish. It also allows administrators to create partitions and groups for other uses. ## Resources ### /State/Components HMS components are created during inventory discovery and provide a higher-level representation of the component, including state, NID, role (i.e. compute/service), subtype, and so on. Unlike ComponentEndpoints, however, they are not strictly linked to the parent RedfishEndpoint, and are not automatically deleted when the RedfishEndpoints are (though they can be deleted via a separate call). This is because these components can also represent abstract components, such as removed components (e.g. which would remain, but have their states changed to \"Empty\" upon removal). ### /Defaults/NodeMaps This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to Node IDs, and optionally, to roles and subroles. These mappings are used when discovering nodes for the first time. These mappings should be uploaded prior to discovery and should contain mappings for each valid node xname in the system, whether populated or not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally role and subrole. Role can be Compute, Application, Storage, Management etc. The NodeMaps collection can be uploaded to HSM automatically at install time by specifying it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, and inventory discovery is performed by HSM. The desired NID numbers will be set as soon as the nodes are created using the NodeMaps collection. It is recommended that Nodemaps are uploaded at install time before discovery happens. If they are uploaded after discovery, then the node xnames need to be manually updated with the correct NIDs. You can update NIDs for individual components by using PATCH /State/Components/{xname}/NID. ### /Inventory/Hardware This resource shows the hardware inventory of the entire system and contains FRU information in location. All entries are displayed as a flat array. ### /Inventory/HardwareByFRU Every component has FRU information. This resource shows the hardware inventory for all FRUs or for a specific FRU irrespective of the location. This information is constant regardless of where the hardware item is currently in the system. If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will have the corresponding HWInventoryByFRU object embedded. This FRU info can also be looked up by FRU ID regardless of the current location. ### /Inventory/Hardware/Query/{xname} This resource gets you information about a specific component and it's sub-components. The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the entire system. ### /Inventory/RedfishEndpoints This is a BMC or other Redfish controller that has a Redfish entry point and Redfish service root. It is used to discover the components managed by this endpoint during discovery and handles all Redfish interactions by these subcomponents. If the endpoint has been discovered, this entry will include the ComponentEndpoint entries for these managed subcomponents. You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. The xname identifies the location of all components in the system, including chassis, controllers, nodes, and so on. Redfish endpoints are given to State Manager. ### /Inventory/ComponentEndpoints Component Endpoints are the specific URLs for each individual component that are under the Redfish endpoint. Component endpoints are discovered during inventory discovery. They are the management-plane representation of system components and are linked to the parent Redfish Endpoint. They provide a glue layer to bridge the higher-level representation of a component with how it is represented locally by Redfish. The collection of ComponentEndpoints can be obtained in full, optionally filtered on certain criteria (e.g. obtain just Node components), or accessed by their xname IDs individually. ### /Inventory/ServiceEndpoints ServiceEndpoints help you do things on Redfish like updating the firmware. They are discovered during inventory discovery. ### /groups Groups are named sets of system components, most commonly nodes. A group groups components under an administratively chosen label (group name). Each component may belong to any number of groups. If a group has exclusiveGroup= set, then a node may only be a member of one group that matches that exclusive label. For example, if the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', then a component that is part of 'green' could not also be placed in 'red'. You can create, modify, or delete a group and its members. You can also use group names as filters for API calls. ### /partitions A partition is a formal, non-overlapping division of the system that forms an administratively distinct sub-system. Each component may belong to at most one partition. Partitions are used as an access control mechanism or for implementing multi-tenancy. You can create, modify, or delete a partition and its members. You can also use partitions as filters for other API calls. ### /memberships A membership shows the association of a component xname to its set of group labels and partition names. There can be many group labels and up to one partition per component. Memberships are not modified directly, as the underlying group or partition is modified instead. A component can be removed from one of the listed groups or partitions or added via POST as well as being present in the initial set of members when a partition or group is created. You can retrieve the memberships for components or memberships for a specific xname. ### /Inventory/DiscoveryStatus Check discovery status for all components or you can track the status for a specific job ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains status information about the discovery operation for clients to query. The discover operation returns a link or links to status objects so that a client can determine when the discovery operation is complete. ### /Inventory/Discover Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint objects are created, inventory discovery will query these controllers and create or update management plane and managed plane objects representing the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). ### /Subscriptions/SCN Manage subscriptions to state change notifications (SCNs) from HSM. You can also subscribe to state change notifications by using the HMS Notification Fanout Daemon API. ## Workflows ### Add and Delete a Redfish Endpoint #### POST /Inventory/RedfishEndpoints When you manually create Redfish endpoints, the discovery is automatically initiated. You would create Redfish endpoints for components that are not automatically discovered by REDS or MEDS. #### GET /Inventory/RedfishEndpoints Check the Redfish endpoints that have been added and check the status of discovery. #### DELETE /Inventory/RedfishEndpoints/{xname} Delete a specific Redfish endpoint. ### Perform Inventory Discovery #### POST /Inventory/Discover Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. If needed, specify an ID or hostname (xname) in the payload. #### GET /Inventory/DiscoveryStatus Check the discovery status of all Redfish endpoints. You can also check the discovery status for each individual component by providing ID. ### Query and Update HMS Components (State/NID) #### GET /State/Components Retrieve all HMS Components found by inventory discovery as a named (\"Components\") array. #### PATCH /State/Components/{xname}/Enabled Modify the component's Enabled field. #### DELETE /State/Components/{xname} Delete a specific HMS component by providing its xname. As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints are deleted. ### Create and Delete a New Group #### GET /hsm/v2/State/Components Retrieve a list of desired components and their state. Select the nodes that you want to group. #### POST /groups Create the new group with desired members. Provide a group label (required), description, name, members etc. in the JSON payload. #### GET /groups/{group_label} Retrieve the group that was create with the label. #### GET /State/Components/{group_label} Retrieve the current state for all the components in the group. #### DELETE /groups/{group_label} Delete the group specified by {group_label}. ## Valid State Transitions ``` Prior State -> New State - Reason Ready -> Standby - HBTD if node has many missed heartbeats Ready -> Ready/Warning - HBTD if node has a few missed heartbeats Standby -> Ready - HBTD Node re-starts heartbeating On -> Ready - HBTD Node started heartbeating Off -> Ready - HBTD sees heartbeats before Redfish Event (On) Standby -> On - Redfish Event (On) or if re-discovered while in the standby state Off -> On - Redfish Event (On) Standby -> Off - Redfish Event (Off) Ready -> Off - Redfish Event (Off) On -> Off - Redfish Event (Off) Any State -> Empty - Redfish Endpoint is disabled meaning component removal ``` Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package hsm_client + +// This is a subtype of HWInventoryByLocation for HMSType CDUMgmtSwitch. It represents a CDU management switch. It is selected via the 'discriminator: HWInventoryByLocationType' of HWInventoryByLocation when HWInventoryByLocationType is 'HWInvByLocCDUMgmtSwitch'. +type HwInvByLocCduMgmtSwitch struct { + ID string `json:"ID"` + Type_ *HmsType100 `json:"Type,omitempty"` + // This is the normalized (from zero) index of the component location (e.g. slot number) when there are more than one. This should match the last number in the xname in most cases (e.g. Ordinal 0 for node x0c0s0b0n0). Note that Redfish may use a different value or naming scheme, but this is passed through via the *LocationInfo for the type of component. + Ordinal int32 `json:"Ordinal,omitempty"` + // Populated or Empty - whether location is populated. + Status string `json:"Status,omitempty"` + // This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. + HWInventoryByLocationType string `json:"HWInventoryByLocationType"` + PopulatedFRU *HwInventory100HwInventoryByFru `json:"PopulatedFRU,omitempty"` + CDUMgmtSwitchLocationInfo *HwInventory100RedfishChassisLocationInfo `json:"CDUMgmtSwitchLocationInfo,omitempty"` +} diff --git a/pkg/hsm-client/model_hw_inv_by_loc_mgmt_hl_switch.go b/pkg/hsm-client/model_hw_inv_by_loc_mgmt_hl_switch.go new file mode 100644 index 00000000..c31e64c3 --- /dev/null +++ b/pkg/hsm-client/model_hw_inv_by_loc_mgmt_hl_switch.go @@ -0,0 +1,23 @@ +/* + * Hardware State Manager API + * + * The Hardware State Manager (HSM) inventories, monitors, and manages hardware, and tracks the logical and dynamic component states, such as roles, NIDs, and other basic metadata needed to provide most common administrative and operational functions. HSM is the single source of truth for the state of the system. It contains the component state and information on Redfish endpoints for communicating with components via Redfish. It also allows administrators to create partitions and groups for other uses. ## Resources ### /State/Components HMS components are created during inventory discovery and provide a higher-level representation of the component, including state, NID, role (i.e. compute/service), subtype, and so on. Unlike ComponentEndpoints, however, they are not strictly linked to the parent RedfishEndpoint, and are not automatically deleted when the RedfishEndpoints are (though they can be deleted via a separate call). This is because these components can also represent abstract components, such as removed components (e.g. which would remain, but have their states changed to \"Empty\" upon removal). ### /Defaults/NodeMaps This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to Node IDs, and optionally, to roles and subroles. These mappings are used when discovering nodes for the first time. These mappings should be uploaded prior to discovery and should contain mappings for each valid node xname in the system, whether populated or not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally role and subrole. Role can be Compute, Application, Storage, Management etc. The NodeMaps collection can be uploaded to HSM automatically at install time by specifying it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, and inventory discovery is performed by HSM. The desired NID numbers will be set as soon as the nodes are created using the NodeMaps collection. It is recommended that Nodemaps are uploaded at install time before discovery happens. If they are uploaded after discovery, then the node xnames need to be manually updated with the correct NIDs. You can update NIDs for individual components by using PATCH /State/Components/{xname}/NID. ### /Inventory/Hardware This resource shows the hardware inventory of the entire system and contains FRU information in location. All entries are displayed as a flat array. ### /Inventory/HardwareByFRU Every component has FRU information. This resource shows the hardware inventory for all FRUs or for a specific FRU irrespective of the location. This information is constant regardless of where the hardware item is currently in the system. If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will have the corresponding HWInventoryByFRU object embedded. This FRU info can also be looked up by FRU ID regardless of the current location. ### /Inventory/Hardware/Query/{xname} This resource gets you information about a specific component and it's sub-components. The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the entire system. ### /Inventory/RedfishEndpoints This is a BMC or other Redfish controller that has a Redfish entry point and Redfish service root. It is used to discover the components managed by this endpoint during discovery and handles all Redfish interactions by these subcomponents. If the endpoint has been discovered, this entry will include the ComponentEndpoint entries for these managed subcomponents. You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. The xname identifies the location of all components in the system, including chassis, controllers, nodes, and so on. Redfish endpoints are given to State Manager. ### /Inventory/ComponentEndpoints Component Endpoints are the specific URLs for each individual component that are under the Redfish endpoint. Component endpoints are discovered during inventory discovery. They are the management-plane representation of system components and are linked to the parent Redfish Endpoint. They provide a glue layer to bridge the higher-level representation of a component with how it is represented locally by Redfish. The collection of ComponentEndpoints can be obtained in full, optionally filtered on certain criteria (e.g. obtain just Node components), or accessed by their xname IDs individually. ### /Inventory/ServiceEndpoints ServiceEndpoints help you do things on Redfish like updating the firmware. They are discovered during inventory discovery. ### /groups Groups are named sets of system components, most commonly nodes. A group groups components under an administratively chosen label (group name). Each component may belong to any number of groups. If a group has exclusiveGroup= set, then a node may only be a member of one group that matches that exclusive label. For example, if the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', then a component that is part of 'green' could not also be placed in 'red'. You can create, modify, or delete a group and its members. You can also use group names as filters for API calls. ### /partitions A partition is a formal, non-overlapping division of the system that forms an administratively distinct sub-system. Each component may belong to at most one partition. Partitions are used as an access control mechanism or for implementing multi-tenancy. You can create, modify, or delete a partition and its members. You can also use partitions as filters for other API calls. ### /memberships A membership shows the association of a component xname to its set of group labels and partition names. There can be many group labels and up to one partition per component. Memberships are not modified directly, as the underlying group or partition is modified instead. A component can be removed from one of the listed groups or partitions or added via POST as well as being present in the initial set of members when a partition or group is created. You can retrieve the memberships for components or memberships for a specific xname. ### /Inventory/DiscoveryStatus Check discovery status for all components or you can track the status for a specific job ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains status information about the discovery operation for clients to query. The discover operation returns a link or links to status objects so that a client can determine when the discovery operation is complete. ### /Inventory/Discover Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint objects are created, inventory discovery will query these controllers and create or update management plane and managed plane objects representing the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). ### /Subscriptions/SCN Manage subscriptions to state change notifications (SCNs) from HSM. You can also subscribe to state change notifications by using the HMS Notification Fanout Daemon API. ## Workflows ### Add and Delete a Redfish Endpoint #### POST /Inventory/RedfishEndpoints When you manually create Redfish endpoints, the discovery is automatically initiated. You would create Redfish endpoints for components that are not automatically discovered by REDS or MEDS. #### GET /Inventory/RedfishEndpoints Check the Redfish endpoints that have been added and check the status of discovery. #### DELETE /Inventory/RedfishEndpoints/{xname} Delete a specific Redfish endpoint. ### Perform Inventory Discovery #### POST /Inventory/Discover Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. If needed, specify an ID or hostname (xname) in the payload. #### GET /Inventory/DiscoveryStatus Check the discovery status of all Redfish endpoints. You can also check the discovery status for each individual component by providing ID. ### Query and Update HMS Components (State/NID) #### GET /State/Components Retrieve all HMS Components found by inventory discovery as a named (\"Components\") array. #### PATCH /State/Components/{xname}/Enabled Modify the component's Enabled field. #### DELETE /State/Components/{xname} Delete a specific HMS component by providing its xname. As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints are deleted. ### Create and Delete a New Group #### GET /hsm/v2/State/Components Retrieve a list of desired components and their state. Select the nodes that you want to group. #### POST /groups Create the new group with desired members. Provide a group label (required), description, name, members etc. in the JSON payload. #### GET /groups/{group_label} Retrieve the group that was create with the label. #### GET /State/Components/{group_label} Retrieve the current state for all the components in the group. #### DELETE /groups/{group_label} Delete the group specified by {group_label}. ## Valid State Transitions ``` Prior State -> New State - Reason Ready -> Standby - HBTD if node has many missed heartbeats Ready -> Ready/Warning - HBTD if node has a few missed heartbeats Standby -> Ready - HBTD Node re-starts heartbeating On -> Ready - HBTD Node started heartbeating Off -> Ready - HBTD sees heartbeats before Redfish Event (On) Standby -> On - Redfish Event (On) or if re-discovered while in the standby state Off -> On - Redfish Event (On) Standby -> Off - Redfish Event (Off) Ready -> Off - Redfish Event (Off) On -> Off - Redfish Event (Off) Any State -> Empty - Redfish Endpoint is disabled meaning component removal ``` Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package hsm_client + +// This is a subtype of HWInventoryByLocation for HMSType MgmtHLSwitch. It represents a high level management switch. It is selected via the 'discriminator: HWInventoryByLocationType' of HWInventoryByLocation when HWInventoryByLocationType is 'HWInvByLocMgmtHLSwitch'. +type HwInvByLocMgmtHlSwitch struct { + ID string `json:"ID"` + Type_ *HmsType100 `json:"Type,omitempty"` + // This is the normalized (from zero) index of the component location (e.g. slot number) when there are more than one. This should match the last number in the xname in most cases (e.g. Ordinal 0 for node x0c0s0b0n0). Note that Redfish may use a different value or naming scheme, but this is passed through via the *LocationInfo for the type of component. + Ordinal int32 `json:"Ordinal,omitempty"` + // Populated or Empty - whether location is populated. + Status string `json:"Status,omitempty"` + // This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. + HWInventoryByLocationType string `json:"HWInventoryByLocationType"` + PopulatedFRU *HwInventory100HwInventoryByFru `json:"PopulatedFRU,omitempty"` + MgmtHLSwitchLocationInfo *HwInventory100RedfishChassisLocationInfo `json:"MgmtHLSwitchLocationInfo,omitempty"` +} diff --git a/pkg/hsm-client/model_hw_inv_by_loc_mgmt_switch.go b/pkg/hsm-client/model_hw_inv_by_loc_mgmt_switch.go new file mode 100644 index 00000000..affb2af0 --- /dev/null +++ b/pkg/hsm-client/model_hw_inv_by_loc_mgmt_switch.go @@ -0,0 +1,23 @@ +/* + * Hardware State Manager API + * + * The Hardware State Manager (HSM) inventories, monitors, and manages hardware, and tracks the logical and dynamic component states, such as roles, NIDs, and other basic metadata needed to provide most common administrative and operational functions. HSM is the single source of truth for the state of the system. It contains the component state and information on Redfish endpoints for communicating with components via Redfish. It also allows administrators to create partitions and groups for other uses. ## Resources ### /State/Components HMS components are created during inventory discovery and provide a higher-level representation of the component, including state, NID, role (i.e. compute/service), subtype, and so on. Unlike ComponentEndpoints, however, they are not strictly linked to the parent RedfishEndpoint, and are not automatically deleted when the RedfishEndpoints are (though they can be deleted via a separate call). This is because these components can also represent abstract components, such as removed components (e.g. which would remain, but have their states changed to \"Empty\" upon removal). ### /Defaults/NodeMaps This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to Node IDs, and optionally, to roles and subroles. These mappings are used when discovering nodes for the first time. These mappings should be uploaded prior to discovery and should contain mappings for each valid node xname in the system, whether populated or not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally role and subrole. Role can be Compute, Application, Storage, Management etc. The NodeMaps collection can be uploaded to HSM automatically at install time by specifying it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, and inventory discovery is performed by HSM. The desired NID numbers will be set as soon as the nodes are created using the NodeMaps collection. It is recommended that Nodemaps are uploaded at install time before discovery happens. If they are uploaded after discovery, then the node xnames need to be manually updated with the correct NIDs. You can update NIDs for individual components by using PATCH /State/Components/{xname}/NID. ### /Inventory/Hardware This resource shows the hardware inventory of the entire system and contains FRU information in location. All entries are displayed as a flat array. ### /Inventory/HardwareByFRU Every component has FRU information. This resource shows the hardware inventory for all FRUs or for a specific FRU irrespective of the location. This information is constant regardless of where the hardware item is currently in the system. If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will have the corresponding HWInventoryByFRU object embedded. This FRU info can also be looked up by FRU ID regardless of the current location. ### /Inventory/Hardware/Query/{xname} This resource gets you information about a specific component and it's sub-components. The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the entire system. ### /Inventory/RedfishEndpoints This is a BMC or other Redfish controller that has a Redfish entry point and Redfish service root. It is used to discover the components managed by this endpoint during discovery and handles all Redfish interactions by these subcomponents. If the endpoint has been discovered, this entry will include the ComponentEndpoint entries for these managed subcomponents. You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. The xname identifies the location of all components in the system, including chassis, controllers, nodes, and so on. Redfish endpoints are given to State Manager. ### /Inventory/ComponentEndpoints Component Endpoints are the specific URLs for each individual component that are under the Redfish endpoint. Component endpoints are discovered during inventory discovery. They are the management-plane representation of system components and are linked to the parent Redfish Endpoint. They provide a glue layer to bridge the higher-level representation of a component with how it is represented locally by Redfish. The collection of ComponentEndpoints can be obtained in full, optionally filtered on certain criteria (e.g. obtain just Node components), or accessed by their xname IDs individually. ### /Inventory/ServiceEndpoints ServiceEndpoints help you do things on Redfish like updating the firmware. They are discovered during inventory discovery. ### /groups Groups are named sets of system components, most commonly nodes. A group groups components under an administratively chosen label (group name). Each component may belong to any number of groups. If a group has exclusiveGroup= set, then a node may only be a member of one group that matches that exclusive label. For example, if the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', then a component that is part of 'green' could not also be placed in 'red'. You can create, modify, or delete a group and its members. You can also use group names as filters for API calls. ### /partitions A partition is a formal, non-overlapping division of the system that forms an administratively distinct sub-system. Each component may belong to at most one partition. Partitions are used as an access control mechanism or for implementing multi-tenancy. You can create, modify, or delete a partition and its members. You can also use partitions as filters for other API calls. ### /memberships A membership shows the association of a component xname to its set of group labels and partition names. There can be many group labels and up to one partition per component. Memberships are not modified directly, as the underlying group or partition is modified instead. A component can be removed from one of the listed groups or partitions or added via POST as well as being present in the initial set of members when a partition or group is created. You can retrieve the memberships for components or memberships for a specific xname. ### /Inventory/DiscoveryStatus Check discovery status for all components or you can track the status for a specific job ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains status information about the discovery operation for clients to query. The discover operation returns a link or links to status objects so that a client can determine when the discovery operation is complete. ### /Inventory/Discover Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint objects are created, inventory discovery will query these controllers and create or update management plane and managed plane objects representing the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). ### /Subscriptions/SCN Manage subscriptions to state change notifications (SCNs) from HSM. You can also subscribe to state change notifications by using the HMS Notification Fanout Daemon API. ## Workflows ### Add and Delete a Redfish Endpoint #### POST /Inventory/RedfishEndpoints When you manually create Redfish endpoints, the discovery is automatically initiated. You would create Redfish endpoints for components that are not automatically discovered by REDS or MEDS. #### GET /Inventory/RedfishEndpoints Check the Redfish endpoints that have been added and check the status of discovery. #### DELETE /Inventory/RedfishEndpoints/{xname} Delete a specific Redfish endpoint. ### Perform Inventory Discovery #### POST /Inventory/Discover Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. If needed, specify an ID or hostname (xname) in the payload. #### GET /Inventory/DiscoveryStatus Check the discovery status of all Redfish endpoints. You can also check the discovery status for each individual component by providing ID. ### Query and Update HMS Components (State/NID) #### GET /State/Components Retrieve all HMS Components found by inventory discovery as a named (\"Components\") array. #### PATCH /State/Components/{xname}/Enabled Modify the component's Enabled field. #### DELETE /State/Components/{xname} Delete a specific HMS component by providing its xname. As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints are deleted. ### Create and Delete a New Group #### GET /hsm/v2/State/Components Retrieve a list of desired components and their state. Select the nodes that you want to group. #### POST /groups Create the new group with desired members. Provide a group label (required), description, name, members etc. in the JSON payload. #### GET /groups/{group_label} Retrieve the group that was create with the label. #### GET /State/Components/{group_label} Retrieve the current state for all the components in the group. #### DELETE /groups/{group_label} Delete the group specified by {group_label}. ## Valid State Transitions ``` Prior State -> New State - Reason Ready -> Standby - HBTD if node has many missed heartbeats Ready -> Ready/Warning - HBTD if node has a few missed heartbeats Standby -> Ready - HBTD Node re-starts heartbeating On -> Ready - HBTD Node started heartbeating Off -> Ready - HBTD sees heartbeats before Redfish Event (On) Standby -> On - Redfish Event (On) or if re-discovered while in the standby state Off -> On - Redfish Event (On) Standby -> Off - Redfish Event (Off) Ready -> Off - Redfish Event (Off) On -> Off - Redfish Event (Off) Any State -> Empty - Redfish Endpoint is disabled meaning component removal ``` Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. + * + * API version: 1.0.0 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package hsm_client + +// This is a subtype of HWInventoryByLocation for HMSType MgmtSwitch. It represents a management switch. It is selected via the 'discriminator: HWInventoryByLocationType' of HWInventoryByLocation when HWInventoryByLocationType is 'HWInvByLocMgmtSwitch'. +type HwInvByLocMgmtSwitch struct { + ID string `json:"ID"` + Type_ *HmsType100 `json:"Type,omitempty"` + // This is the normalized (from zero) index of the component location (e.g. slot number) when there are more than one. This should match the last number in the xname in most cases (e.g. Ordinal 0 for node x0c0s0b0n0). Note that Redfish may use a different value or naming scheme, but this is passed through via the *LocationInfo for the type of component. + Ordinal int32 `json:"Ordinal,omitempty"` + // Populated or Empty - whether location is populated. + Status string `json:"Status,omitempty"` + // This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. + HWInventoryByLocationType string `json:"HWInventoryByLocationType"` + PopulatedFRU *HwInventory100HwInventoryByFru `json:"PopulatedFRU,omitempty"` + MgmtSwitchLocationInfo *HwInventory100RedfishChassisLocationInfo `json:"MgmtSwitchLocationInfo,omitempty"` +} diff --git a/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory.go b/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory.go index edb6b308..df313bbc 100644 --- a/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory.go +++ b/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory.go @@ -25,6 +25,12 @@ type HwInventory100HwInventory struct { NodeEnclosures []HwInvByLocNodeEnclosure `json:"NodeEnclosures,omitempty"` // All appropriate components with HMS type 'HSNBoard' given Target component/partition and query type. HSNBoards []HwInvByLocHsnBoard `json:"HSNBoards,omitempty"` + // All appropriate components with HMS type 'MgmtSwitch' given Target component/partition and query type. + MgmtSwitches []HwInvByLocMgmtSwitch `json:"MgmtSwitches,omitempty"` + // All appropriate components with HMS type 'MgmtHLSwitch' given Target component/partition and query type. + MgmtHLSwitches []HwInvByLocMgmtHlSwitch `json:"MgmtHLSwitches,omitempty"` + // All appropriate components with HMS type 'CDUMgmtSwitch' given Target component/partition and query type. + CDUMgmtSwitches []HwInvByLocCduMgmtSwitch `json:"CDUMgmtSwitches,omitempty"` // All appropriate components with HMS type 'Node' given Target component/partition and query type. Nodes []HwInvByLocNode `json:"Nodes,omitempty"` // All appropriate components with HMS type 'Processor' given Target component/partition and query type. diff --git a/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory_by_fru.go b/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory_by_fru.go index 9c09d3da..7a9c4f7a 100644 --- a/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory_by_fru.go +++ b/pkg/hsm-client/model_hw_inventory_1_0_0_hw_inventory_by_fru.go @@ -16,4 +16,29 @@ type HwInventory100HwInventoryByFru struct { FRUSubtype string `json:"FRUSubtype,omitempty"` // This is used as a discriminator to determine the additional HMS-type specific subtype that is returned. HWInventoryByFRUType string `json:"HWInventoryByFRUType"` + + // HMSType Underlying RF Type How named in json object + HMSCabinetFRUInfo *HwInventory100RedfishChassisFruInfo `json:"CabinetFRUInfo,omitempty"` + HMSChassisFRUInfo *HwInventory100RedfishChassisFruInfo `json:"ChassisFRUInfo,omitempty"` // Mountain chassis + HMSComputeModuleFRUInfo *HwInventory100RedfishChassisFruInfo `json:"ComputeModuleFRUInfo,omitempty"` + HMSRouterModuleFRUInfo *HwInventory100RedfishChassisFruInfo `json:"RouterModuleFRUInfo,omitempty"` + HMSNodeEnclosureFRUInfo *HwInventory100RedfishChassisFruInfo `json:"NodeEnclosureFRUInfo,omitempty"` + HMSHSNBoardFRUInfo *HwInventory100RedfishChassisFruInfo `json:"HSNBoardFRUInfo,omitempty"` + HMSMgmtSwitchFRUInfo *HwInventory100RedfishChassisFruInfo `json:"MgmtSwitchFRUInfo,omitempty"` + HMSMgmtHLSwitchFRUInfo *HwInventory100RedfishChassisFruInfo `json:"MgmtHLSwitchFRUInfo,omitempty"` + HMSCDUMgmtSwitchFRUInfo *HwInventory100RedfishChassisFruInfo `json:"CDUMgmtSwitchFRUInfo,omitempty"` + HMSNodeFRUInfo *HwInventory100RedfishSystemFruInfo `json:"NodeFRUInfo,omitempty"` + HMSProcessorFRUInfo *HwInventory100RedfishProcessorFruInfo `json:"ProcessorFRUInfo,omitempty"` + HMSNodeAccelFRUInfo *HwInventory100RedfishProcessorFruInfo `json:"NodeAccelFRUInfo,omitempty"` + HMSMemoryFRUInfo *HwInventory100RedfishMemoryFruInfo `json:"MemoryFRUInfo,omitempty"` + HMSDriveFRUInfo *HwInventory100RedfishDriveFruInfo `json:"DriveFRUInfo,omitempty"` + // HMSHSNNICFRUInfo *HwInventory100Redfish `json:"NodeHsnNicFRUInfo,omitempty"` + + HMSPDUFRUInfo *HwInventory100RedfishPdufruInfo `json:"PDUFRUInfo,omitempty"` + HMSOutletFRUInfo *HwInventory100RedfishOutletFruInfo `json:"OutletFRUInfo,omitempty"` + HMSCMMRectifierFRUInfo *HwInventory100RedfishCmmRectifierFruInfo `json:"CMMRectifierFRUInfo,omitempty"` + HMSNodeEnclosurePowerSupplyFRUInfo *HwInventory100RedfishNodeEnclosurePowerSupplyFruInfo `json:"NodeEnclosurePowerSupplyFRUInfo,omitempty"` + HMSNodeBMCFRUInfo *HwInventory100RedfishManagerFruInfo `json:"NodeBMCFRUInfo,omitempty"` + HMSRouterBMCFRUInfo *HwInventory100RedfishManagerFruInfo `json:"RouterBMCFRUInfo,omitempty"` + HMSNodeAccelRiserFRUInfo *HwInventory100RedfishNodeAccelRiserFruInfo `json:"NodeAccelRiserFRUInfo,omitempty"` } diff --git a/pkg/hsm-client/model_redfish_endpoint_1_0_0_redfish_endpoint_discovery_info.go b/pkg/hsm-client/model_redfish_endpoint_1_0_0_redfish_endpoint_discovery_info.go index 4e170cbb..4f637e3c 100644 --- a/pkg/hsm-client/model_redfish_endpoint_1_0_0_redfish_endpoint_discovery_info.go +++ b/pkg/hsm-client/model_redfish_endpoint_1_0_0_redfish_endpoint_discovery_info.go @@ -12,16 +12,6 @@ import ( "time" ) -// // Contains info about the discovery status of the given endpoint. -// type RedfishEndpoint100RedfishEndpointDiscoveryInfo struct { -// // The time the last discovery attempt took place. -// LastAttempt time.Time `json:"LastAttempt,omitempty"` -// // Describes the outcome of the last discovery attempt. -// LastStatus string `json:"LastStatus,omitempty"` -// // Version of Redfish as reported by the RF service root. -// RedfishVersion string `json:"RedfishVersion,omitempty"` -// } - // Contains info about the discovery status of the given endpoint. type RedfishEndpoint100RedfishEndpointDiscoveryInfo struct { // The time the last discovery attempt took place. diff --git a/pkg/hsm-client/openapi.yaml b/pkg/hsm-client/openapi.yaml new file mode 100644 index 00000000..f7cace43 --- /dev/null +++ b/pkg/hsm-client/openapi.yaml @@ -0,0 +1,12248 @@ +--- +swagger: '2.0' +info: + description: >- + The Hardware State Manager (HSM) inventories, monitors, and manages + hardware, and + tracks the logical and dynamic component states, such as roles, + NIDs, and other basic metadata needed to provide + most common administrative and operational functions. HSM is the + single source of truth for the state of the system. + It contains the component state and information on Redfish endpoints for + communicating with components via Redfish. + It also allows administrators to create partitions and groups for other uses. + + ## Resources + + ### /State/Components + + HMS components are created during inventory discovery and provide a higher-level + representation of the component, including state, NID, role (i.e. compute/service), + subtype, and so on. Unlike ComponentEndpoints, + however, they are not strictly linked to the parent RedfishEndpoint, and are not + automatically deleted when the RedfishEndpoints are (though they can be deleted via a + separate call). This is because these components can also represent abstract components, + such as removed components (e.g. which would remain, but have their states changed to + "Empty" upon removal). + + ### /Defaults/NodeMaps + + + This resource allows a mapping file (NodeMaps) to be uploaded that maps node xnames to + Node IDs, and optionally, to roles and subroles. These mappings are used + when discovering nodes for the first time. These mappings should be uploaded prior to + discovery and should contain mappings for each valid node xname in the system, whether populated or + not. Nodemap is a JSON file that contains the xname of the node, node ID, and optionally + role and subrole. Role can be Compute, Application, Storage, Management etc. + The NodeMaps collection can be uploaded to HSM automatically at install time by specifying + it as a JSON file. As a result, the endpoints are then automatically discovered by REDS, + and inventory discovery is performed by HSM. The desired NID numbers will be set as + soon as the nodes are created using the NodeMaps collection. + + + It is recommended that Nodemaps are uploaded at install time before discovery happens. + If they are uploaded after discovery, then the node xnames need + to be manually updated with the correct NIDs. You can update NIDs for individual + components by using PATCH /State/Components/{xname}/NID. + + + ### /Inventory/Hardware + + + This resource shows the hardware inventory of the entire system and contains + FRU information in location. All entries are displayed as a flat array. + + ### /Inventory/HardwareByFRU + + + Every component has FRU information. This resource shows the hardware inventory for + all FRUs or for a specific FRU irrespective of the location. This information is constant + regardless of where the hardware item is currently in the system. + If a HWInventoryByLocation entry is currently populated with a piece of hardware, it will + have the corresponding HWInventoryByFRU object embedded. This FRU info can also be + looked up by FRU ID regardless of the current location. + + ### /Inventory/Hardware/Query/{xname} + + + This resource gets you information about a specific component and it's sub-components. + The xname can be a component, partition, ALL, or s0. Both ALL and s0 represent the + entire system. + + ### /Inventory/RedfishEndpoints + + + This is a BMC or other Redfish controller that has a Redfish entry + point and Redfish service root. It is used to discover the components + managed by this endpoint during discovery and handles all Redfish + interactions by these subcomponents. If the endpoint has been discovered, + this entry will include the ComponentEndpoint entries for these managed + subcomponents. + You can also create a Redfish Endpoint or update the definition for a Redfish Endpoint. + The xname identifies the location of all components in the system, including chassis, + controllers, nodes, and so on. Redfish endpoints are given to State Manager. + + ### /Inventory/ComponentEndpoints + + + Component Endpoints are the specific URLs for each individual component + that are under the Redfish endpoint. + Component endpoints are discovered during inventory discovery. They are the + management-plane representation of system components and are linked to the parent + Redfish Endpoint. They provide a glue layer to bridge the higher-level representation + of a component with how it is represented locally by Redfish. + + + The collection of ComponentEndpoints can be obtained in full, optionally filtered on + certain criteria (e.g. obtain just Node components), or accessed by their xname IDs + individually. + + ### /Inventory/ServiceEndpoints + + + ServiceEndpoints help you do things on Redfish like updating the firmware. + They are discovered during inventory discovery. + + ### /groups + + + Groups are named sets of system components, most commonly nodes. A group groups + components under an administratively + chosen label (group name). Each component may belong to any number of groups. + If a group has exclusiveGroup= set, then a node + may only be a member of one group that matches that exclusive label. For example, if + the exclusive group label 'colors' is associated with groups 'blue', 'red', and 'green', + then a component that is part of 'green' could not also be placed in 'red'. + + You can create, modify, or delete a group and its members. You can also use group names + as filters for API calls. + + ### /partitions + + + A partition is a formal, non-overlapping division of the system that forms an administratively + distinct sub-system. Each component may belong to at most one partition. Partitions + are used as an access control mechanism or for implementing multi-tenancy. You can create, + modify, or delete a partition and its members. You can also use partitions as filters + for other API calls. + + ### /memberships + + + A membership shows the association of a component xname to its set of group + labels and partition names. There can be many group labels and up to + one partition per component. + Memberships are not modified directly, as the underlying group or partition is modified instead. + A component can be removed from one of the listed groups or partitions or added via POST + as well as being present in the initial set of members + when a partition or group is created. You can retrieve the memberships for components + or memberships for a specific xname. + + ### /Inventory/DiscoveryStatus + + + Check discovery status for all components or you can track the status for a specific job + ID. You can also check per-endpoint discover status for each RedfishEndpoint. Contains + status information about the discovery operation for clients to query. The discover + operation returns a link or links to status objects so that a client can determine when + the discovery operation is complete. + + ### /Inventory/Discover + + + Discover subcomponents by querying all RedfishEndpoints. Once the RedfishEndpoint + objects are created, inventory discovery will query + these controllers and create or update management plane and managed plane objects representing + the components (e.g. nodes, node enclosures, node cards for Mountain chassis CMM endpoints). + + ### /Subscriptions/SCN + + + Manage subscriptions to state change notifications (SCNs) from HSM. You can also + subscribe to state change notifications by using the HMS Notification Fanout Daemon API. + + ## Workflows + + + ### Add and Delete a Redfish Endpoint + + #### POST /Inventory/RedfishEndpoints + + When you manually create Redfish endpoints, the discovery is automatically initiated. + You would create Redfish endpoints for components that are not automatically + discovered by REDS or MEDS. + + #### GET /Inventory/RedfishEndpoints + + Check the Redfish endpoints that have been added and check the status of discovery. + + #### DELETE /Inventory/RedfishEndpoints/{xname} + + Delete a specific Redfish endpoint. + + ### Perform Inventory Discovery + + #### POST /Inventory/Discover + + Start inventory discovery of a system's subcomponents by querying all Redfish endpoints. + If needed, specify an ID or hostname (xname) in the payload. + + #### GET /Inventory/DiscoveryStatus + + Check the discovery status of all Redfish endpoints. You can also check the discovery + status for each individual component by providing ID. + + ### Query and Update HMS Components (State/NID) + + #### GET /State/Components + + Retrieve all HMS Components found by inventory discovery as a named ("Components") array. + + + #### PATCH /State/Components/{xname}/Enabled + + Modify the component's Enabled field. + + + #### DELETE /State/Components/{xname} + + Delete a specific HMS component by providing its xname. + As noted, components are not automatically deleted when RedfishEndpoints or ComponentEndpoints + are deleted. + + ### Create and Delete a New Group + + #### GET /hsm/v2/State/Components + + Retrieve a list of desired components and their state. Select the nodes that you want + to group. + + + #### POST /groups + + Create the new group with desired members. Provide a group label (required), description, + name, members etc. in the JSON payload. + + #### GET /groups/{group_label} + + Retrieve the group that was create with the label. + + #### GET /State/Components/{group_label} + + Retrieve the current state for all the components in the group. + + #### DELETE /groups/{group_label} + + Delete the group specified by {group_label}. + + ## Valid State Transitions + + ``` + + Prior State -> New State - Reason + + Ready -> Standby - HBTD if node has many missed heartbeats + + Ready -> Ready/Warning - HBTD if node has a few missed heartbeats + + Standby -> Ready - HBTD Node re-starts heartbeating + + On -> Ready - HBTD Node started heartbeating + + Off -> Ready - HBTD sees heartbeats before Redfish Event (On) + + Standby -> On - Redfish Event (On) or if re-discovered while in the standby state + + Off -> On - Redfish Event (On) + + Standby -> Off - Redfish Event (Off) + + Ready -> Off - Redfish Event (Off) + + On -> Off - Redfish Event (Off) + + Any State -> Empty - Redfish Endpoint is disabled meaning component removal + + ``` + + Generally, nodes transition 'Off' -> 'On' -> 'Ready' when going from 'Off' to booted, and 'Ready' -> 'Ready/Warning' -> 'Standby' -> 'Off' when shutdown. + + + version: 1.0.0 + title: Hardware State Manager API +host: 'sms' +basePath: /apis/smd/hsm/v2 +schemes: + - https +produces: + - application/json + - application/problem+json +consumes: + - application/json +tags: + - name: Service Info + description: >- + Service information APIs for getting information on the HSM service such + as readiness, etc. + - name: Component + description: >- + High-level component information by xname: state, flag, NID, role, etc. + - name: NodeMap + description: >- + Given a node xname ID, provide defaults for NID, Role, etc. to be used + when the node is first discovered. These are uploaded prior to + discovery and should contain mappings for each valid node xname in + the system, whether populated or not. + - name: HWInventory + description: >- + HWInventoryByLocation collection containing all components matching + the query that was submitted. + - name: HWInventoryByLocation + description: >- + Hardware inventory information for the given system location/xname + - name: HWInventoryByFRU + description: >- + This represents a physical piece of hardware with properties specific + to a unique component in the system. This information + is constant regardless of where the hardware item is currently in + the system (if it is in the system). If a HWInventoryByLocation + entry is currently populated with a piece of hardware, it will have + the corresponding HWInventoryByFRU object embedded. This FRU info can + also be looked up by FRU ID regardless of the current location. + - name: HWInventoryHistory + description: >- + Hardware inventory historical information for the given system location/xname/FRU + - name: RedfishEndpoint + description: >- + This is a BMC or other Redfish controller that has a Redfish entry + point and Redfish service root. It is used to discover the components + managed by this endpoint during discovery and handles all Redfish + interactions by these subcomponents. If the endpoint has been discovered, + this entry will include the ComponentEndpoint entries for these managed + subcomponents. + - name: ComponentEndpoint + description: >- + The Redfish-discovered properties for a component discovered through, + and managed by a RedfishEndpoint, such as a node, blade, and so on. + These are obtainable via a discovered RedfishEndpoint or can be + looked up by their xnames separately so that just the information + for a particular component, e.g. node can be retrieved. They can + also provide a back-reference to the parent endpoint. + - name: ServiceEndpoint + description: >- + The Redfish-discovered properties for a service discovered through, + and managed by a RedfishEndpoint, such as UpdateService, EventService, + and so on. These are obtainable via a discovered RedfishEndpoint or can + be looked up by their service type and xnames separately so that just the + information for a particular service, e.g. UpdateService can be retrieved. + They can also provide a back-reference to the parent endpoint. + - name: ComponentEthernetInterfaces + description: >- + The MAC address to IP address relation for components in the system. If + the component has been discovered by HSM, the xname of the component that + has the Ethernet interface will be associated with it as well. + - name: Group + description: >- + A group is an informal, possibly overlapping division of the system that + groups Components (most frequently nodes) under an administratively + chosen label (i.e. group name). Unlike partitions, components can be + members of any number of groups. + - name: Partition + description: >- + A partition is a formal, non-overlapping division of the system that + forms an administratively distinct sub-system e.g. for implementing + multi-tenancy. + - name: Membership + description: >- + A membership is a mapping of a component xname to its set of group + labels and partition names. + # - name: SystemInformationBlock + # description: >- + # The SystemInformationBlock (SIB) object nests Component, HWInventory, + # HSNType, and HSNInfo subschemas that match a particular query, + # associating information that may be needed by a + # large number of endpoints into a single, + # self-contained object. While most of the individual subschemas, + # (HSNInfo is the exception, which is obtained from the fabric manager) + # can be queried using several of the more general APIs, the + # SIB supports the use of a single API that clients may use to + # bootstrap their initial system state, for example. + - name: DiscoveryStatus + description: >- + Contains status information about the discovery operation for clients + to query. The discover operation returns a link or links to + status objects so that a client can determine when the discovery operation + is complete. + - name: Discover + description: >- + Trigger a discovery of system component data + by interrogating all, or a subset, of the RedfishEndpoints currently + known to the system. + - name: SCN + description: >- + Manage subscriptions to state change notifications (SCNs) from HSM. + - name: Locking + description: >- + Manage locks and reservations on components. + - name: PowerMap + description: >- + Power mapping of components to the components supplying them power. This + may contain components in the system whether populated or not. +paths: + ######################################################################## + # + # Service Information API calls - Ready, etc. + # + ######################################################################## + /service/ready: + get: + tags: + - Service Info + summary: Kubernetes readiness endpoint to monitor service health + x-private: true + description: >- + The `readiness` resource works in conjunction with the Kubernetes readiness + probe to determine when the service is no longer healthy and able to respond + correctly to requests. Too many failures of the readiness probe will result + in the traffic being routed away from this service and eventually the service + will be shut down and restarted if in an unready state for too long. + + + This is primarily an endpoint for the automated Kubernetes system. + operationId: doReadyGet + responses: + "200": + description: >- + [OK](http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.1) + Network API call success + schema: + $ref: '#/definitions/Response_1.0.0' + "503": + description: >- + The service is unhealthy and not ready + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + + /service/liveness: + get: + tags: + - Service Info + summary: Kubernetes liveness endpoint to monitor service health + x-private: true + description: >- + The `liveness` resource works in conjunction with the Kubernetes liveness + probe to determine when the service is no longer responding to + requests. Too many failures of the liveness probe will result in the + service being shut down and restarted. + + + This is primarily an endpoint for the automated Kubernetes system. + operationId: doLivenessGet + responses: + "204": + description: >- + [No Content](http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.5) + Network API call success + "503": + description: >- + The service is not taking HTTP requests + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values: + get: + tags: + - Service Info + summary: Retrieve all valid values for use as parameters + description: >- + Retrieve all valid values for use as parameters. + operationId: doValuesGet + responses: + "200": + description: An array of parameters and their valid values. + schema: + $ref: '#/definitions/Values.1.0.0_Values' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/arch: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'arch' parameter + description: >- + Retrieve all valid values for use with the 'arch' (component architecture) parameter. + operationId: doArchValuesGet + responses: + "200": + description: An array of valid values for the 'arch' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_ArchArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/class: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'class' parameter + description: >- + Retrieve all valid values for use with the 'class' (hardware class) parameter. + operationId: doClassValuesGet + responses: + "200": + description: An array of valid values for the 'class' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_ClassArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/flag: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'flag' parameter + description: >- + Retrieve all valid values for use with the 'flag' (component flag) parameter. + operationId: doFlagValuesGet + responses: + "200": + description: An array of valid values for the 'flag' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_FlagArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/nettype: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'nettype' parameter + description: >- + Retrieve all valid values for use with the 'nettype' (component network type) parameter. + operationId: doNetTypeValuesGet + responses: + "200": + description: An array of valid values for the 'nettype' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_NetTypeArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/role: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'role' parameter + description: >- + Retrieve all valid values for use with the 'role' (component role) parameter. + operationId: doRoleValuesGet + responses: + "200": + description: An array of valid values for the 'role' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_RoleArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/subrole: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'subrole' parameter + description: >- + Retrieve all valid values for use with the 'subrole' (component subrole) parameter. + operationId: doSubRoleValuesGet + responses: + "200": + description: An array of valid values for the 'subrole' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_SubRoleArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/state: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'state' parameter + description: >- + Retrieve all valid values for use with the 'state' (component state) parameter. + operationId: doStateValuesGet + responses: + "200": + description: An array of valid values for the 'state' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_StateArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /service/values/type: + get: + tags: + - Service Info + summary: Retrieve all valid values for use with the 'type' parameter + description: >- + Retrieve all valid values for use with the 'type' (component HMSType) parameter. + operationId: doTypeValuesGet + responses: + "200": + description: An array of valid values for the 'type' parameter. + schema: + $ref: '#/definitions/Values.1.0.0_TypeArray' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # State Component API Calls + # + ######################################################################## + /State/Components: + get: + tags: + - Component + summary: Retrieve collection of HMS Components + description: >- + Retrieve the full collection of state/components in the form of a + ComponentArray. Full results can also be filtered by query + parameters. When multiple parameters are specified, they are applied + in an AND fashion (e.g. type AND state). When a parameter is specified + multiple times, they are applied in an OR fashion (e.g. type AND state1 OR state2). + If the collection is empty or the filters have no match, an + empty array is returned. + operationId: doComponentsGet + parameters: + - $ref: '#/parameters/compIDParam' + - $ref: '#/parameters/compTypeParam' + - $ref: '#/parameters/compStateParam' + - $ref: '#/parameters/compFlagParam' + - $ref: '#/parameters/compRoleParam' + - $ref: '#/parameters/compSubroleParam' + - $ref: '#/parameters/compEnabledParam' + - $ref: '#/parameters/compSoftwareStatusParam' + - $ref: '#/parameters/compSubtypeParam' + - $ref: '#/parameters/compArchParam' + - $ref: '#/parameters/compClassParam' + - $ref: '#/parameters/compNIDParam' + - $ref: '#/parameters/compNIDStartParam' + - $ref: '#/parameters/compNIDEndParam' + - $ref: '#/parameters/compPartitionParam' + - $ref: '#/parameters/compGroupParam' + - name: stateonly + in: query + type: boolean + description: >- + Return only component state and flag fields (plus xname/ID and + type). Results can be modified and used for bulk state/flag- + only patch operations. + - name: flagonly + in: query + type: boolean + description: >- + Return only component flag field (plus xname/ID and type). + Results can be modified and used for bulk flag-only patch + operations. + - name: roleonly + in: query + type: boolean + description: >- + Return only component role and subrole fields (plus xname/ID and type). + Results can be modified and used for bulk role-only patches. + - name: nidonly + in: query + type: boolean + description: >- + Return only component NID field (plus xname/ID and type). + Results can be modified and used for bulk NID-only patches. + responses: + "200": + description: >- + ComponentArray representing results of query. + schema: + $ref: '#/definitions/ComponentArray_ComponentArray' + "400": + description: Bad Request such as invalid argument for filter + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - Component + summary: Create/Update a collection of HMS Components + description: >- + Create/Update a collection of state/components. If the component + already exists it will not be overwritten unless force=true in which + case State, Flag, Subtype, NetType, Arch, and Class will get overwritten. + operationId: doComponentsPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PostArray' + responses: + "204": + description: >- + [No Content](http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.5) + One or more Component entries were successfully created/updated. + "400": + description: Bad Request such as invalid argument for a component field + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - Component + - cli_danger$This will delete all components in HSM, continue? + summary: >- + Delete all components + description: >- + Delete all entries in the components collection. + operationId: doComponentsDeleteAll + responses: + "200": + description: >- + Zero (success) error code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + # /State/Components/ByNID: + # get: + # tags: + # - Component + # summary: GET Component-by-NID Resource IDs Collection + # description: >- + # This produces a collection of URIs to all Components using their + # ByNID path. + # operationId: doComponentResourceURIByNIDGet + # parameters: + # - name: partition + # in: query + # type: string + # description: >- + # Restrict search to the given partition. + # responses: + # "200": + # description: Collection of Component Resource IDs. + # schema: + # $ref: '#/definitions/ComponentByNID.1.0.0_ResourceURICollection' + # examples: + # application/json: + # Name: State Component by NID Collection + # Members: + # - URI: /hsm/v2/State/Components/ByNID/0 + # - URI: /hsm/v2/State/Components/ByNID/1 + # MemberCount: 2 + # "400": + # description: Bad Request + # schema: + # $ref: '#/definitions/Problem7807' + # "404": + # description: Does Not Exist + # schema: + # $ref: '#/definitions/Problem7807' + # default: + # description: Unexpected error + # schema: + # $ref: '#/definitions/Problem7807' + /State/Components/{xname}: + get: + tags: + - Component + summary: Retrieve component at {xname} + description: >- + Retrieve state or components by xname. + operationId: doComponentGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of component to return. + required: true + responses: + "200": + description: Component entry matching xname/ID + schema: + $ref: '#/definitions/Component.1.0.0_Component' + "400": + description: Bad Request or invalid xname + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + put: + tags: + - Component + summary: Create/Update an HMS Component + description: >- + Create/Update a state/component. If the component already exists it + will not be overwritten unless force=true in which case State, Flag, + Subtype, NetType, Arch, and Class will get overwritten. + operationId: doComponentPut + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of the component to create or update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Component.1.0.0_Put' + responses: + "204": + description: >- + [No Content](http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.2.5) + Component entry was successfully created/updated. + "400": + description: Bad Request such as invalid argument for a component field + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - Component + summary: Delete component with ID {xname} + description: >- + Delete a component by xname. + operationId: doComponentDelete + parameters: + - name: xname + in: path + type: string + description: Locational xname of component record to delete. + required: true + responses: + "200": + description: Component is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/ByNID/{nid}: + get: + tags: + - Component + summary: Retrieve component with NID={nid} + description: >- + Retrieve a component by NID. + operationId: doComponentByNIDGet + parameters: + - name: nid + in: path + type: string + description: NID of component to return. + required: true + responses: + "200": + description: Component entry matching xname/ID + schema: + $ref: '#/definitions/Component.1.0.0_Component' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/BulkStateData: + patch: + tags: + - Component + summary: >- + Update multiple components' state data via a list of xnames + description: >- + Specify a list of xnames to update the State and Flag fields. If the Flag field is omitted, + Flag is reverted to 'OK'. Other fields are ignored. The list of IDs + and the new State are required. + operationId: doCompBulkStateDataPatch + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PatchArray.StateData' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/{xname}/StateData: + patch: + tags: + - Component + summary: >- + Update component state data at {xname} + description: >- + Update the component's state and flag fields only. If Flag field is + omitted, the Flag value is reverted to 'OK'. + operationId: doCompStatePatch + parameters: + - name: xname + in: path + type: string + description: Locational xname of component to set state/flag on. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Component.1.0.0_Patch.StateData' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/BulkFlagOnly: + patch: + tags: + - Component + summary: >- + Update multiple components' Flag values via a list of xnames + description: >- + Specify a list of xnames to update the Flag field and specify the value. + The list of IDs and the new Flag are required. + operationId: doCompBulkFlagOnlyPatch + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PatchArray.FlagOnly' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/{xname}/FlagOnly: + patch: + tags: + - Component + summary: >- + Update component Flag value at {xname} + description: The State is not modified. Only the Flag is updated. + operationId: doCompFlagOnlyPatch + parameters: + - name: xname + in: path + type: string + description: Locational xname of component to modify flag on. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Component.1.0.0_Patch.FlagOnly' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/BulkEnabled: + patch: + tags: + - Component + summary: >- + Update multiple components' Enabled values via a list of xnames + description: >- + Update the Enabled field for a list of xnames. Specify a single + value for Enabled and also the list of xnames. Note that Enabled is a boolean field + and a value of false sets the component(s) to disabled. + operationId: doCompBulkEnabledPatch + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PatchArray.Enabled' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/{xname}/Enabled: + patch: + tags: + - Component + summary: >- + Update component Enabled value at {xname} + description: >- + Update the component's Enabled field only. The State and other fields + are not modified. Note that this is a boolean field, a value of false sets + the component to disabled. + operationId: doCompEnabledPatch + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of component to set Enabled to true or false. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Component.1.0.0_Patch.Enabled' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/BulkSoftwareStatus: + patch: + tags: + - Component + summary: >- + Update multiple components' SoftwareStatus values via a list of xnames + description: >- + Update the SoftwareStatus field for a list of xnames. Specify + a single new value of SoftwareStatus like admindown and the list of xnames. + operationId: doCompBulkSwStatusPatch + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PatchArray.SoftwareStatus' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/{xname}/SoftwareStatus: + patch: + tags: + - Component + summary: >- + Update component SoftwareStatus value at {xname} + description: >- + Update the component's SoftwareStatus field only. The State and + other fields are not modified. + operationId: doCompSwStatusPatch + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of component to set new SoftwareStatus value. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Component.1.0.0_Patch.SoftwareStatus' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/BulkRole: + patch: + tags: + - Component + summary: >- + Update multiple components' Role values via a list of xnames + description: >- + Update the Role and SubRole field for a list of xnames. Specify the Role and Subrole values and the list of + xnames. The list of IDs and the new Role are required. + operationId: doCompBulkRolePatch + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PatchArray.Role' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/{xname}/Role: + patch: + tags: + - Component + summary: >- + Update component Role and SubRole values at {xname} + description: >- + Update the component's Role and SubRole fields only. Valid only for nodes. + The State and other fields are not modified. + operationId: doCompRolePatch + parameters: + - name: xname + in: path + type: string + description: Locational xname of component to modify Role on. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Component.1.0.0_Patch.Role' + responses: + "200": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/BulkNID: + patch: + tags: + - Component + summary: >- + Update multiple components' NIDs via ComponentArray + description: >- + Modify the submitted ComponentArray and update the + corresponding NID value for each entry. Other fields are ignored and not changed. + ID field is required for all entries. + operationId: doCompArrayNIDPatch + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PatchArray.NID' + responses: + "204": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/{xname}/NID: + patch: + tags: + - Component + summary: >- + Update component NID value at {xname} + description: >- + Update the component's NID field only. Valid only for nodes. + State and other fields are not modified. + operationId: doCompNIDPatch + parameters: + - name: xname + in: path + type: string + description: Locational xname of component to modify NID on. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Component.1.0.0_Patch.NID' + responses: + "200": + description: Success. + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/Query: + post: + tags: + - Component + summary: Create component query (by xname list), returning ComponentArray + description: >- + Retrieve the targeted entries in the form of a ComponentArray by providing a payload + of component IDs. + operationId: doComponentsQueryPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PostQuery' + responses: + "200": + description: >- + ComponentArray representing results of query. + schema: + $ref: '#/definitions/ComponentArray_ComponentArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/ByNID/Query: + post: + tags: + - Component + summary: Create component query (by NID ranges), returning ComponentArray + description: >- + Retrieve the targeted entries in the form of a ComponentArray by providing a payload + of NID ranges. + operationId: doComponentByNIDQueryPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/ComponentArray_PostByNIDQuery' + responses: + "200": + description: >- + ComponentArray representing results of query. + schema: + $ref: '#/definitions/ComponentArray_ComponentArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /State/Components/Query/{xname}: + get: + tags: + - Component + summary: >- + Retrieve component query for {xname}, returning ComponentArray + description: >- + Retrieve component entries in the form of a ComponentArray by providing xname and + modifiers in the query string. + operationId: doComponentQueryGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of component to query. + required: true + - name: type + in: query + type: string + description: >- + Retrieve xname's children of type={type} instead of {xname} for example NodeBMC, + NodeEnclosure etc. + enum: + - CDU + - CabinetCDU + - CabinetPDU + - CabinetPDUOutlet + - CabinetPDUPowerConnector + - CabinetPDUController + - Cabinet + - Chassis + - ChassisBMC + - CMMRectifier + - CMMFpga + - CEC + - ComputeModule + - RouterModule + - NodeBMC + - NodeEnclosure + - NodeEnclosurePowerSupply + - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch + - Node + - Processor + - Drive + - StorageGroup + - NodeNIC + - Memory + - NodeAccel + - NodeAccelRiser + - NodeFpga + - HSNAsic + - RouterFpga + - RouterBMC + - HSNLink + - HSNConnector + - INVALID + - $ref: '#/parameters/compStateParam' + - $ref: '#/parameters/compFlagParam' + - $ref: '#/parameters/compRoleParam' + - $ref: '#/parameters/compSubroleParam' + - $ref: '#/parameters/compEnabledParam' + - $ref: '#/parameters/compSoftwareStatusParam' + - $ref: '#/parameters/compSubtypeParam' + - $ref: '#/parameters/compArchParam' + - $ref: '#/parameters/compClassParam' + - $ref: '#/parameters/compNIDParam' + - $ref: '#/parameters/compNIDStartParam' + - $ref: '#/parameters/compNIDEndParam' + - $ref: '#/parameters/compPartitionParam' + - $ref: '#/parameters/compGroupParam' + - name: stateonly + in: query + type: boolean + description: >- + Return only component state and flag fields (plus xname/ID and + type). Results can be modified and used for bulk state/flag- + only patch operations. + - name: flagonly + in: query + type: boolean + description: >- + Return only component flag field (plus xname/ID and type). + Results can be modified and used for bulk flag-only patch + operations. + - name: roleonly + in: query + type: boolean + description: >- + Return only component role and subrole fields (plus xname/ID and type). + Results can be modified and used for bulk role-only patches. + - name: nidonly + in: query + type: boolean + description: >- + Return only component NID field (plus xname/ID and type). + Results can be modified and used for bulk NID-only patches. + responses: + "200": + description: >- + ComponentArray representing results of query. + schema: + $ref: '#/definitions/ComponentArray_ComponentArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Locking v2 API Calls + # + ######################################################################## + '/locks/reservations/remove': + post: + summary: Forcibly deletes existing reservations. + x-private: true + description: >- + Given a list of components, forcibly deletes any existing reservation. Does not change lock state; + does not disable the reservation ability of the component. An empty set of xnames will delete + reservations on all xnames. This functionality should be used sparingly, the normal flow should be + to release reservations, versus removing them. + parameters: + - name: payload + in: body + description: >- + List of xnames to remove reservations. A `rigid` processing model will result in the entire set + of xnames not having their reservation removed if an xname doesn't exist, or isn't reserved. A + `flexible` processing model will perform all actions possible. + required: true + schema: + $ref: '#/definitions/AdminReservationRemove.1.0.0' + + responses: + '202': + description: Accepted. Returns a count + list of xnames that succeeded or failed the operation. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: >- + Bad request; something is wrong with the structure received. Will not be used to represent + failure to accomplish the operation, that will be returned in the standard payload. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not delete reservations + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-reservations + - cli_ignore + '/locks/reservations/release': + post: + summary: Releases existing reservations. + x-private: true + description: Given a list of {xname & reservation key}, releases the associated reservations. + parameters: + - name: payload + in: body + required: true + description: >- + List of {xname and reservation key} to release reservations. A `rigid` processing model will + result in the entire set of xnames not having their reservation released if an xname doesn't exist, + or isn't reserved. A `flexible` processing model will perform all actions possible. + schema: + $ref: '#/definitions/ReservedKeys.1.0.0' + + responses: + '202': + description: Accepted. Returns a count + list of xnames that succeeded or failed the operation. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: >- + Bad request; something is wrong with the structure received. Will not be used to represent + failure to accomplish the operation, that will be returned in the standard payload. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not delete reservations + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-reservations + - cli_ignore + '/locks/reservations': + post: + summary: Create reservations + x-private: true + description: >- + Creates reservations on a set of xnames of infinite duration. Component must be locked to create a + reservation. + parameters: + - name: payload + in: body + description: >- + List of components to create reservations. A `rigid` processing model will result in the entire + set of xnames not having reservations created if an xname doesn't exist, or isn't locked, or if + already reserved. A `flexible` processing model will perform all actions possible. + required: true + schema: + $ref: '#/definitions/AdminReservationCreate.1.0.0' + responses: + '202': + description: Accepted request. See response for details. + schema: + $ref: '#/definitions/AdminReservationCreate_Response.1.0.0' + '400': + description: >- + Bad request; something is wrong with the structure received. Will not be used to represent + failure to accomplish the operation, that will be returned in the standard payload. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not accept reservations + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-reservations + - cli_ignore + '/locks/service/reservations/release': + post: + summary: Releases existing reservations. + x-private: true + description: Given a list of {xname & reservation key}, releases the associated reservations. + parameters: + - name: payload + in: body + description: >- + List of {xname and reservation key} to release reservations. A `rigid` processing model will + result in the entire set of xnames not having their reservation released if an xname doesn't exist, + or isn't reserved. A `flexible` processing model will perform all actions possible. + required: true + schema: + $ref: '#/definitions/ReservedKeys.1.0.0' + + responses: + '202': + description: Accepted. Returns a count + list of xnames that succeeded or failed the operation. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: >- + Bad request; something is wrong with the structure received. Will not be used to represent + failure to accomplish the operation, that will be returned in the standard payload. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not delete reservations + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - service-reservations + - cli_ignore + '/locks/service/reservations': + post: + summary: Create reservations + x-private: true + description: >- + Creates reservations on a set of xnames of finite duration. Component must be unlocked to create a + reservation. + parameters: + - name: payload + in: body + description: >- + List of components to create reservations. A `rigid` processing model will result in the + entire set of xnames not having reservations created if an xname doesn't exist, or isn't locked, + or if already reserved. A `flexible` processing model will perform all actions possible. + required: true + schema: + $ref: '#/definitions/ServiceReservationCreate.1.0.0' + responses: + '202': + description: Accepted request. See response for details. + schema: + $ref: '#/definitions/ServiceReservationCreate_Response.1.0.0' + '400': + description: >- + Bad request; something is wrong with the structure received. Will not be used to represent + failure to accomplish the operation, that will be returned in the standard payload. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not accept reservations + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - service-reservations + - cli_ignore + '/locks/service/reservations/renew': + post: + summary: Renew existing reservations. + x-private: true + description: Given a list of {xname & reservation key}, renews the associated reservations. + parameters: + - name: payload + in: body + description: >- + List of {xname and reservation key} to renew reservations. A `rigid` processing model will + result in the entire set of xnames not having their reservation renewed if an xname doesn't exist, + or isn't reserved. A `flexible` processing model will perform all actions possible. + required: true + schema: + $ref: '#/definitions/ReservedKeysWithRenewal.1.0.0' + + responses: + '202': + description: Accepted. Returns a count + list of xnames that succeeded or failed the operation. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: >- + Bad request; something is wrong with the structure received. Will not be used to represent + failure to accomplish the operation, that will be returned in the standard payload. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not delete reservations + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - service-reservations + - cli_ignore + '/locks/service/reservations/check': + post: + summary: Check the validity of reservations. + x-private: true + description: Using xname + reservation key check on the validity of reservations. + parameters: + - name: payload + in: body + description: List of components & deputy keys to check on validity of reservations. + required: true + schema: + $ref: '#/definitions/DeputyKeys.1.0.0' + + responses: + '202': + description: Created reservations. + schema: + $ref: '#/definitions/ServiceReservationCheck_Response.1.0.0' + '400': + description: Bad request. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not check reservations. + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - service-reservations + - cli_ignore + '/locks/status': + post: + summary: Retrieve lock status for component IDs. + description: Using component ID retrieve the status of any lock and/or reservation. + parameters: + - name: payload + in: body + description: List of components to retrieve status. + required: true + schema: + $ref: '#/definitions/Xnames' + + responses: + '200': + description: Got lock(s) status. + schema: + $ref: '#/definitions/AdminStatusCheck_Response.1.0.0' + '400': + description: Bad request. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not get lock status. + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-locks + + get: + summary: Retrieve lock status for all components or a filtered subset of components. + description: >- + Retrieve the status of all component locks and/or reservations. Results can be + filtered by query parameters. + parameters: + - $ref: '#/parameters/compTypeParam' + - $ref: '#/parameters/compStateParam' + - $ref: '#/parameters/compRoleParam' + - $ref: '#/parameters/compSubroleParam' + - name: locked + in: query + type: boolean + description: Return components based on the 'Locked' field of their lock status. + required: false + - name: reserved + in: query + type: boolean + description: Return components based on the 'Reserved' field of their lock status. + required: false + - name: reservationDisabled + in: query + type: boolean + description: Return components based on the 'ReservationDisabled' field of their lock status. + required: false + + responses: + '200': + description: Got lock(s) status. + schema: + $ref: '#/definitions/AdminStatusCheck_Response.1.0.0' + '400': + description: Bad request. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not get lock status. + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-locks + + '/locks/lock': + post: + summary: Locks components. + description: >- + Using a component create a lock. Cannot be locked if already locked, or if there is a current + reservation. + parameters: + - name: payload + in: body + description: List of xnames to lock. + required: true + schema: + $ref: '#/definitions/AdminLock.1.0.0' + + responses: + '200': + description: >- + Zero (success) error code - one or more entries locked. + Message contains count of locked items. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: Bad request. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not lock lock. + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-locks + '/locks/unlock': + post: + summary: Unlocks components. + description: Using a component unlock a lock. Cannot be unlocked if already unlocked. + parameters: + - name: payload + in: body + description: List of xnames to unlock. + required: true + schema: + $ref: '#/definitions/AdminLock.1.0.0' + + responses: + '200': + description: >- + Zero (success) error code - one or more entries unlocked. + Message contains count of unlocked locks. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: Bad request. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not unlock lock. + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-locks + '/locks/repair': + post: + summary: Repair components lock and reservation ability. + description: Repairs the disabled status of an xname allowing new reservations to be created. + parameters: + - name: payload + in: body + description: List of xnames to repair. + required: true + schema: + $ref: '#/definitions/AdminLock.1.0.0' + + responses: + '200': + description: >- + Zero (success) error code - one or more locks repaired. + Message contains count of repaired locks. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: Bad request. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not repair lock. + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-locks + + '/locks/disable': + post: + summary: Disables the ability to create a reservation on components. + description: >- + Disables the ability to create a reservation on components, deletes any existing reservations. + Does not change lock state. Attempting to disable an already-disabled component will not result + in an error. + parameters: + - name: payload + in: body + description: List of xnames to disable. + required: true + schema: + $ref: '#/definitions/AdminLock.1.0.0' + + responses: + '200': + description: >- + Zero (success) error code - one or more locks disabled. + Message contains count of disabled locks. + schema: + $ref: '#/definitions/XnameResponse_1.0.0' + '400': + description: Bad request. + schema: + $ref: '#/definitions/Problem7807' + '500': + description: Server error, could not disable lock. + schema: + $ref: '#/definitions/Problem7807' + tags: + - Locking + - admin-locks + ######################################################################## + # + # Node Maps - Default NIDs/Roles/etc. to use on first time discovery + # + ######################################################################## + /Defaults/NodeMaps: + get: + tags: + - NodeMap + summary: >- + Retrieve all NodeMaps, returning NodeMapArray + description: >- + Retrieve all Node map entries as a named array, or an empty array if the + collection is empty. + operationId: doNodeMapsGet + responses: + "200": + description: >- + Named NodeMaps array representing all xname locations that + have defaults registered. + schema: + $ref: '#/definitions/NodeMapArray_NodeMapArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - NodeMap + summary: Create or Modify NodeMaps + description: >- + Create or update the given set of NodeMaps whose ID fields are each a + valid xname. The NID field is required and serves as the NID that + will be used when a component with the same xname ID is created for + the first time by discovery. + + Role is an optional field. A node is assigned the + default (e.g. Compute) role when it is first created during discovery. + The NID must be unique across all entries. + + SubRole is an optional field. A node is assigned no subrole by default + when it is first created during discovery. + + + The NodeMaps collection should be uploaded at install time by specifying + it as a JSON file. As a result, when the endpoints are automatically discovered + by REDS, and inventory discovery is performed by HSM, the desired NID numbers will + be set as soon as the nodes are created using the NodeMaps collection. All node xnames + that are expected to be used in the system should be included in the mapping, even + if not currently populated. + + + It is recommended that NodeMaps are uploaded at install time before discovery happens. + If they are uploaded after discovery, then the node xnames need + to be manually updated with the correct NIDs. You can update NIDs for individual + components by using PATCH /State/Components/{xname}/NID. + + + Note the following points: + + * If the POST operation contains an xname that already exists, the entry will be overwritten + with the new entry (i.e. new NID, Role (if given), etc.). + + * The same NID cannot be used for more than one xname. If such a duplicate would be + created, the operation will fail. + + * If the node has already been discovered for the first time (that is, it exists in + /hsm/v2/State/Components and already has a previous/default NID), modifying the + NodeMap entry will not automatically reassign the current NID. + + * If you wish to use POST to completely replace the current NodeMaps collection + (rather than modifying it), first delete it using the DELETE method on the collection. + Otherwise the current entries and the new ones will be merged if they are disjoint + sets of nodes. + operationId: doNodeMapPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/NodeMapArray_NodeMapArray' + responses: + "200": + description: >- + Zero (success) error code - one or more entries created or + updated. Message contains count of new/modified items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate resource (NID) would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - NodeMap + - cli_danger$This will delete all node maps, continue? + summary: >- + Delete all NodeMap entities + description: >- + Delete all entries in the NodeMaps collection. + operationId: doNodeMapsDeleteAll + responses: + "200": + description: >- + Zero (success) error code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Defaults/NodeMaps/{xname}: + get: + tags: + - NodeMap + summary: Retrieve NodeMap at {xname} + description: >- + Retrieve NodeMap, i.e. defaults NID/Role/etc. for node located at + physical location {xname}. + operationId: doNodeMapGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of NodeMap record to return. + required: true + responses: + "200": + description: NodeMap entry matching xname/ID + schema: + $ref: '#/definitions/NodeMap.1.0.0_NodeMap' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - NodeMap + summary: Delete NodeMap with ID {xname} + description: >- + Delete NodeMap entry for a specific node {xname}. + operationId: doNodeMapDelete + parameters: + - name: xname + in: path + type: string + description: Locational xname of NodeMap record to delete. + required: true + responses: + "200": + description: Zero (success) error code - NodeMap is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + put: + tags: + - NodeMap + summary: Update definition for NodeMap ID {xname} + description: >- + Update or create an entry for an individual node xname using PUT. + Note the following points: + + * If the PUT operation contains an xname that already exists, the entry will be overwritten + with the new entry (i.e. new NID, Role (if given), etc.). + + * The same NID cannot be used for more than one xname. If such a duplicate would be + created, the operation will fail. + + * If the node has already been discovered for the first time (that is, it exists in + /hsm/v2/State/Components and already has a previous/default NID), modifying the + NodeMap entry will not automatically reassign the current NID. + operationId: doNodeMapPut + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of NodeMap record to create or update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/NodeMap.1.0.0_NodeMap' + responses: + "200": + description: NodeMap entry was successfully created/updated. + schema: + $ref: '#/definitions/NodeMap.1.0.0_PostNodeMap' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate resource (NID) would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Hardware Inventory API calls + # + ######################################################################## + /Inventory/Hardware: + get: + tags: + - HWInventoryByLocation + summary: >- + Retrieve all HWInventoryByLocation entries in array + description: >- + Retrieve all HWInventoryByLocation entries. Note that all entries are displayed as + a flat array. + For most purposes, you will want to use /Inventory/Hardware/Query. + operationId: doHWInvByLocationGetAll + parameters: + - $ref: '#/parameters/compIDParam' + - $ref: '#/parameters/compTypeParam' + - name: manufacturer + in: query + type: string + description: >- + Retrieve HWInventoryByLocation entries with the given Manufacturer. + - name: partnumber + in: query + type: string + description: >- + Retrieve HWInventoryByLocation entries with the given part number. + - name: serialnumber + in: query + type: string + description: >- + Retrieve HWInventoryByLocation entries with the given serial number. + - name: fruid + in: query + type: string + description: >- + Retrieve HWInventoryByLocation entries with the given FRU ID. + responses: + "200": + description: >- + Flat, unsorted HWInventoryByLocation array. + schema: + type: array + items: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - HWInventoryByLocation + - cli_ignore + summary: Create/Update hardware inventory entries + description: >- + Create/Update hardware inventory entries + x-private: true + operationId: doHWInvByLocationPost + parameters: + - name: payload + in: body + required: true + schema: + type: object + properties: + Hardware: + type: array + items: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + example: + Hardware: + - ID: x3000c0s23b4n4h0 + HWInventoryByLocationType: HWInvByLocHSNNIC + HSNNICLocationInfo: + Id: "HPCNet3" + Description: "Shasta Timms NMC REV04 (HSN)" + PopulatedFRU: + HWInventoryByFRUType: HWInvByFRUHSNNIC + HSNNICFRUInfo: + Model: "ConnectX-5 100Gb/s" + SerialNumber: "HG20190738" + PartNumber: "102005303" + Manufacturer: "Mellanox Technologies, Ltd." + responses: + "200": + description: >- + Zero (success) error code - one or more entries created or + updated. Message contains count of new/modified items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - HWInventoryByLocation + - cli_danger$This will delete all hardware inventory, continue? + summary: >- + Delete all HWInventoryByLocation entries + description: >- + Delete all entries in the HWInventoryByLocation collection. + Note that this does not delete any associated HWInventoryByFRU entries. + operationId: doHWInvByLocationDeleteAll + responses: + "200": + description: >- + Zero (success) response code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/HardwareByFRU: + get: + tags: + - HWInventoryByFRU + summary: >- + Retrieve all HWInventoryByFRU entries in a flat array + description: >- + Retrieve all HWInventoryByFRU entries. Note that there is no organization + of the data, the entries are presented as a flat array. + For most purposes, you will want to use /Inventory/Hardware/Query + unless you are interested in components that are not currently + installed anywhere. + operationId: doHWInvByFRUGetAll + parameters: + - name: fruid + in: query + type: string + description: >- + Retrieve HWInventoryByFRU entries with the given FRU ID. + - $ref: '#/parameters/compTypeParam' + - name: manufacturer + in: query + type: string + description: >- + Retrieve HWInventoryByFRU entries with the given Manufacturer. + - name: partnumber + in: query + type: string + description: >- + Retrieve HWInventoryByFRU entries with the given part number. + - name: serialnumber + in: query + type: string + description: >- + Retrieve HWInventoryByFRU entries with the given serial number. + responses: + "200": + description: >- + Flat, unsorted HWInventoryByFRU array. + schema: + type: array + items: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - HWInventoryByFRU + - cli_danger$This will delete all FRUs for HSM, continue? + summary: >- + Delete all HWInventoryByFRU entries + description: >- + Delete all entries in the HWInventoryByFRU collection. + Note that this does not delete any associated HWInventoryByLocation entries. + Also, if any items are associated with a HWInventoryByLocation, the deletion will fail. + operationId: doHWInvByFRUDeleteAll + responses: + "200": + description: >- + Zero (success) response code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/Hardware/{xname}: + get: + tags: + - HWInventoryByLocation + summary: >- + Retrieve HWInventoryByLocation entry at {xname} + description: >- + Retrieve HWInventoryByLocation entries for a specific xname. + operationId: doHWInvByLocationGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of hardware inventory record to return. + required: true + responses: + "200": + description: HWInventoryByLocation entry matching xname/ID + schema: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + examples: + application/json: + # TODO: this is the exact same example as the HWInvByLocNode + # subclass. This is the most instructive example, but we + # should probably provide both a River and Mountain example + # instead of the same one. + ID: x0c0s0b0n0 + Type: Node + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocNode + NodeLocationInfo: + Id: System.Embedded.1 + Name: Name describing system or where it is located, per manufacturing + Description: Description of system/node type, per manufacturing + Hostname: if_defined_in_Redfish + ProcessorSummary: + Count: 2 + Model: Multi-Core Intel(R) Xeon(R) processor E5-16xx Series + MemorySummary: + TotalSystemMemoryGiB: 64 + PopulatedFRU: + FRUID: Dell-99999-1234.1234.2345 + Type: Node + Subtype: River + HWInventoryByFRUType: HWInvByFRUNode + NodeFRUInfo: + AssetTag: AdminAssignedAssetTag + BiosVersion: v1.0.2.9999 + Model: OKS0P2354 + Manufacturer: Dell + PartNumber: p99999 + SerialNumber: 1234.1234.2345 + SKU: as213234 + SystemType: Physical + UUID: 26276e2a-29dd-43eb-8ca6-8186bbc3d971 + Processors: + - ID: x0c0s0b0n0p0 + Type: Processor + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocProcessor + ProcessorLocationInfo: + Id: CPU1 + Name: Processor + Description: Socket 1 Processor + Socket: CPU 1 + PopulatedFRU: + FRUID: HOW-TO-ID-CPUS-FROM-REDFISH-IF-AT-ALL + Type: Processor + Subtype: SKL24 + HWInventoryByFRUType: HWInvByFRUProcessor + ProcessorFRUInfo: + InstructionSet: x86-64 + Manufacturer: Intel + MaxSpeedMHz: 2600 + Model: Intel(R) Xeon(R) CPU E5-2623 v4 @ 2.60GHz + ProcessorArchitecture: x86 + ProcessorId: + EffectiveFamily: 6 + EffectiveModel: 79 + IdentificationRegisters: 0x000406F1 + MicrocodeInfo: 0xB000017 + Step: 1 + VendorID: GenuineIntel + ProcessorType: CPU + TotalCores: 24 + TotalThreads: 48 + - ID: x0c0s0b0n0p1 + Type: Processor + Ordinal: 1 + Status: Populated + HWInventoryByLocationType: HWInvByLocProcessor + ProcessorLocationInfo: + Id: CPU2 + Name: Processor + Description: Socket 2 Processor + Socket: CPU 2 + PopulatedFRU: + FRUID: HOW-TO-ID-CPUS-FROM-REDFISH-IF-AT-ALL + Type: Processor + Subtype: SKL24 + HWInventoryByFRUType: HWInvByFRUProcessor + ProcessorFRUInfo: + InstructionSet: x86-64 + Manufacturer: Intel + MaxSpeedMHz: 2600 + Model: Intel(R) Xeon(R) CPU E5-2623 v4 @ 2.60GHz + ProcessorArchitecture: x86 + ProcessorId: + EffectiveFamily: 6 + EffectiveModel: 79 + IdentificationRegisters: 0x000406F1 + MicrocodeInfo: 0xB000017 + Step: 1 + VendorID: GenuineIntel + ProcessorType: CPU + TotalCores: 24 + TotalThreads: 48 + Memory: + - ID: x0c0s0b0n0d0 + Type: Memory + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM1 + Name: DIMM Slot 1 + MemoryLocation: + Socket: 1 + MemoryController: 1 + Channel: 1 + Slot: 1 + PopulatedFRU: + FRUID: MFR-PARTNUMBER-SERIALNUMBER + Type: Memory + Subtype: DIMM2400G32 + HWInventoryByFRUType: HWInvByFRUMemory + MemoryFRUInfo: + BaseModuleType: RDIMM + BusWidthBits: 72 + CapacityMiB: 32768 + DataWidthBits: 64 + ErrorCorrection: MultiBitECC + Manufacturer: Micron + MemoryType: DRAM + MemoryDeviceType: DDR4 + OperatingSpeedMhz: 2400 + PartNumber: XYZ-123-1232 + RankCount: 2 + SerialNumber: 12344567689j + - ID: x0c0s0b0n0d1 + Type: Memory + Ordinal: 1 + Status: Empty + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM2 + Name: Socket 1 DIMM Slot 2 + MemoryLocation: + Socket: 1 + MemoryController: 1 + Channel: 1 + Slot: 2 + PopulatedFRU: + - ID: x0c0s0b0n0d2 + Type: Memory + Ordinal: 2 + Status: Populated + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM3 + Name: Socket 2 DIMM Slot 1 + MemoryLocation: + Socket: 2 + MemoryController: 2 + Channel: 1 + Slot: 1 + PopulatedFRU: + FRUID: MFR-PARTNUMBER-SERIALNUMBER_2 + Type: Memory + Subtype: DIMM2400G32 + HWInventoryByFRUType: HWInvByFRUMemory + MemoryFRUInfo: + BaseModuleType: RDIMM + BusWidthBits: 72 + CapacityMiB: 32768 + DataWidthBits: 64 + ErrorCorrection: MultiBitECC + Manufacturer: Micron + MemoryType: DRAM + MemoryDeviceType: DDR4 + OperatingSpeedMhz: 2400 + PartNumber: XYZ-123-1232 + RankCount: 2 + SerialNumber: 346456346346j + - ID: x0c0s0b0n0d3 + Type: Memory + Ordinal: 3 + Status: Empty + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM3 + Name: Socket 2 DIMM Slot 2 + MemoryLocation: + Socket: 2 + MemoryController: 2 + Channel: 1 + Slot: 2 + PopulatedFRU: + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - HWInventoryByLocation + summary: DELETE HWInventoryByLocation entry with ID (location) {xname} + description: >- + Delete HWInventoryByLocation entry for a specific xname. + operationId: doHWInvByLocationDelete + parameters: + - name: xname + in: path + type: string + description: + Locational xname of HWInventoryByLocation record to delete. + required: true + responses: + "200": + description: Zero (success) code - entry is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/HardwareByFRU/{fruid}: + get: + tags: + - HWInventoryByFRU + summary: Retrieve HWInventoryByFRU for {fruid} + description: >- + Retrieve HWInventoryByFRU for a specific fruID. + operationId: doHWInvByFRUGet + parameters: + - name: fruid + in: path + type: string + description: >- + Global HMS field-replaceable (FRU) identifier (serial number, etc.) + of the hardware component to select. + required: true + responses: + "200": + description: HWInventoryByFRU entry matching fruid + schema: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - HWInventoryByFRU + summary: Delete HWInventoryByFRU entry with FRU identifier {fruid} + description: >- + Delete an entry in the HWInventoryByFRU collection. + Note that this does not delete the associated HWInventoryByLocation entry + if the FRU is currently residing in the system. In fact, if + the FRU ID is associated with a HWInventoryByLocation currently, + the deletion will fail. + operationId: doHWInvByFRUDelete + parameters: + - name: fruid + in: path + type: string + description: Locational xname of HWInventoryByFRU record to delete. + required: true + responses: + "200": + description: Zero (success) code - entry is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: FRU ID does Not Exist - no matching entry to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/Hardware/Query/{xname}: + get: + tags: + - HWInventory + summary: Retrieve results of HWInventory query starting at {xname} + description: >- + Retrieve zero or more HWInventoryByLocation entries in the form of a HWInventory + by providing xname and modifiers in query string. + The FRU (field-replaceable unit) data will be included in each + HWInventoryByLocation entry if the location is populated. + operationId: doHWInvByLocationQueryGet + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of parent component, system (e.g. s0, all) or + partition (p#.#) to target for hardware inventory + required: true + - $ref: '#/parameters/compTypeParam' + - name: children + in: query + type: boolean + description: >- + Also return children of the selected components. Default is + true. + - name: parents + in: query + type: boolean + description: >- + Also return parents of the selected components. + - name: partition + in: query + type: string + description: >- + Restrict search to the given partition (p#.#). Child components are assumed to be + in the same partition as the parent component when performing this kind of query. + - name: format + in: query + type: string + description: >- + How to display results + FullyFlat All component types listed in their own + arrays only. No nesting of any children. + NestNodesOnly Flat except that node subcomponents are nested + hierarchically. + Default is NestNodesOnly. + # Hierarchical All subcomponents listed as children up to + # top level component (or set of cabinets) + responses: + "200": + description: >- + ComponentArray representing results of query. + schema: + $ref: '#/definitions/HWInventory.1.0.0_HWInventory' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/Hardware/History: + get: + tags: + - HWInventoryHistory + summary: >- + Retrieve the history entries for all HWInventoryByLocation entries + description: >- + Retrieve the history entries for all HWInventoryByLocation entries. + operationId: doHWInvHistByLocationsGet + parameters: + - $ref: '#/parameters/compIDParam' + - name: eventtype + in: query + type: string + description: >- + Retrieve the history entries of a specific type (Added, Removed, etc) + for HWInventoryByLocation entries. + - name: starttime + in: query + type: string + description: >- + Retrieve the history entries from after the requested history window + start time for HWInventoryByLocation entries. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + - name: endtime + in: query + type: string + description: >- + Retrieve the history entries from before the requested history window + end time for HWInventoryByLocation entries. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + responses: + "200": + description: >- + An array of history entries sorted by xname. + schema: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryHistoryCollection' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - HWInventoryHistory + - cli_danger$This will delete all hardware history, continue? + summary: >- + Clear the HWInventory history. + description: >- + Delete all HWInventory history entries. + Note that this also deletes history for any associated HWInventoryByFRU entries. + operationId: doHWInvHistByLocationDeleteAll + responses: + "200": + description: >- + Zero (success) response code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/Hardware/History/{xname}: + get: + tags: + - HWInventoryHistory + summary: >- + Retrieve the history entries for the HWInventoryByLocation entry at {xname} + description: >- + Retrieve the history entries for a HWInventoryByLocation entry with a specific xname. + operationId: doHWInvHistByLocationGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of hardware inventory record to return history for. + required: true + - name: eventtype + in: query + type: string + description: >- + Retrieve the history entries of a specific type (Added, Removed, etc) + for a HWInventoryByLocation entry. + - name: starttime + in: query + type: string + description: >- + Retrieve the history entries from after the requested history window + start time for a HWInventoryByLocation entry. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + - name: endtime + in: query + type: string + description: >- + Retrieve the history entries from before the requested history window + end time for a HWInventoryByLocation entry. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + responses: + "200": + description: >- + History entries for the HWInventoryByLocation entry matching xname/ID + schema: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryHistoryArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - HWInventoryHistory + - cli_danger$This will delete all history for this xname, continue? + summary: DELETE history for the HWInventoryByLocation entry with ID (location) {xname} + description: >- + Delete history for the HWInventoryByLocation entry for a specific xname. + operationId: doHWInvHistByLocationDelete + parameters: + - name: xname + in: path + type: string + description: + Locational xname of HWInventoryByLocation record to delete history for. + required: true + responses: + "200": + description: Zero (success) code - entry is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/HardwareByFRU/History: + get: + tags: + - HWInventoryHistory + summary: >- + Retrieve the history entries for all HWInventoryByFRU entries. + description: >- + Retrieve the history entries for all HWInventoryByFRU entries. Sorted by FRU. + operationId: doHWInvHistByFRUsGet + parameters: + - name: fruid + in: query + type: string + description: >- + Retrieve the history entries for HWInventoryByFRU entries with the given FRU ID. + - name: eventtype + in: query + type: string + description: >- + Retrieve the history entries of a specific type (Added, Removed, etc) + for HWInventoryByFRU entries. + - name: starttime + in: query + type: string + description: >- + Retrieve the history entries from after the requested history window + start time for HWInventoryByFRU entries. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + - name: endtime + in: query + type: string + description: >- + Retrieve the history entries from before the requested history window + end time for HWInventoryByFRU entries. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + responses: + "200": + description: >- + An array of history entries sorted by FRU. + schema: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryHistoryCollection' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/HardwareByFRU/History/{fruid}: + get: + tags: + - HWInventoryHistory + summary: Retrieve the history entries for the HWInventoryByFRU for {fruid} + description: >- + Retrieve the history entries for the HWInventoryByFRU for a specific fruID. + operationId: doHWInvHistByFRUGet + parameters: + - name: fruid + in: path + type: string + description: >- + Global HMS field-replaceable (FRU) identifier (serial number, etc.) + of the hardware component to select. + required: true + - name: eventtype + in: query + type: string + description: >- + Retrieve the history entries of a specific type (Added, Removed, etc) + for a HWInventoryByFRU entry. + - name: starttime + in: query + type: string + description: >- + Retrieve the history entries from after the requested history window + start time for a HWInventoryByFRU entry. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + - name: endtime + in: query + type: string + description: >- + Retrieve the history entries from before the requested history window + end time for a HWInventoryByFRU entry. This takes an RFC3339 + formatted string (2006-01-02T15:04:05Z07:00). + responses: + "200": + description: >- + History entries for the HWInventoryByFRU entry matching fruid + schema: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryHistoryArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - HWInventoryHistory + - cli_danger$This will delete all history for this FRU, continue? + summary: Delete history for the HWInventoryByFRU entry with FRU identifier {fruid} + description: >- + Delete history for an entry in the HWInventoryByFRU collection. + operationId: doHWInvHistByFRUDelete + parameters: + - name: fruid + in: path + type: string + description: Locational xname of HWInventoryByFRU record to delete history for. + required: true + responses: + "200": + description: Zero (success) code - entry is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: FRU ID does Not Exist - no matching entry to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # RedfishEndpoint API Calls + # + ######################################################################## + /Inventory/RedfishEndpoints: + get: + tags: + - RedfishEndpoint + summary: >- + Retrieve all RedfishEndpoints, returning RedfishEndpointArray + description: >- + Retrieve all Redfish endpoint entries as a named array, optionally + filtering it. + operationId: doRedfishEndpointsGet + parameters: + - $ref: '#/parameters/compIDParam' + - name: fqdn + in: query + type: string + description: >- + Retrieve RedfishEndpoint with the given FQDN + - $ref: '#/parameters/compTypeParam' + - name: uuid + in: query + type: string + description: >- + Retrieve the RedfishEndpoint with the given UUID. + - name: macaddr + in: query + type: string + description: >- + Retrieve the RedfishEndpoint with the given MAC address. + - name: ipaddress + in: query + type: string + description: >- + Retrieve the RedfishEndpoint with the given IP address. A blank + string will get Redfish endpoints without IP addresses. + - name: lastdiscoverystatus + in: query + type: string + description: >- + Retrieve the RedfishEndpoints with the given discovery status. This can be negated (i.e. !DiscoverOK). + Valid values are: EndpointInvalid, EPResponseFailedDecode, HTTPsGetFailed, NotYetQueried, VerificationFailed, ChildVerificationFailed, DiscoverOK + responses: + "200": + description: >- + Named RedfishEndpoints array representing all current RF endpoints. + schema: + $ref: '#/definitions/RedfishEndpointArray_RedfishEndpointArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - RedfishEndpoint + summary: Create RedfishEndpoint(s) + description: >- + Create a new RedfishEndpoint whose ID field is a valid xname. + ID can be given explicitly, or if the Hostname or hostname portion + of the FQDN is given, and is a valid xname, this will be used for + the ID instead. The Hostname/Domain can be given as separate + fields and will be used to create a FQDN if one is not given. The + reverse is also true. If FQDN is an IP address it will be + treated as a hostname with a blank domain. The domain field is + used currently to assign the domain for discovered nodes + automatically. + + + If ID is given and is a valid XName, the hostname/domain/FQDN + does not need to have an XName as the hostname portion. It can + be any address. + + The ID and FQDN must be unique across all entries. + operationId: doRedfishEndpointsPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/RedfishEndpoint.1.0.0_RedfishEndpoint' + responses: + "201": + description: Success, returns array of created resource URIs + schema: + type: array + items: + $ref: '#/definitions/ResourceURI.1.0.0' + examples: + application/json: + - URI: /hsm/v2/Inventory/RedfishEndpoints/x0c0s0b0 + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate resource would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - RedfishEndpoint + - cli_danger$This will delete all Redfish endpoints in HSM, continue? + summary: >- + Delete all RedfishEndpoints + description: >- + Delete all entries in the RedfishEndpoint collection. + operationId: doRedfishEndpointsDeleteAll + responses: + "200": + description: >- + Zero (success) error code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/RedfishEndpoints/{xname}: + get: + tags: + - RedfishEndpoint + summary: Retrieve RedfishEndpoint at {xname} + description: >- + Retrieve RedfishEndpoint, located at physical location {xname}. + operationId: doRedfishEndpointGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of RedfishEndpoint record to return. + required: true + responses: + "200": + description: RedfishEndpoint entry matching xname/ID + schema: + $ref: '#/definitions/RedfishEndpoint.1.0.0_RedfishEndpoint' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - RedfishEndpoint + summary: Delete RedfishEndpoint with ID {xname} + description: >- + Delete RedfishEndpoint record for a specific xname. + operationId: doRedfishEndpointDelete + parameters: + - name: xname + in: path + type: string + description: Locational xname of RedfishEndpoint record to delete. + required: true + responses: + "200": + description: Zero (success) error code - component is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + put: + tags: + - RedfishEndpoint + - cli_ignore + summary: Update definition for RedfishEndpoint ID {xname} + description: >- + Create or update RedfishEndpoint record for a specific xname. + operationId: doRedfishEndpointPut + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of RedfishEndpoint record to create or update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/RedfishEndpoint.1.0.0_RedfishEndpoint' + responses: + "200": + description: Success, return updated RedfishEndpoint resource + schema: + $ref: '#/definitions/RedfishEndpoint.1.0.0_RedfishEndpoint' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to update + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + patch: + tags: + - RedfishEndpoint + summary: Update (PATCH) definition for RedfishEndpoint ID {xname} + description: >- + Update (PATCH) RedfishEndpoint record for a specific xname. + operationId: doRedfishEndpointPatch + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of RedfishEndpoint record to create or update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/RedfishEndpoint.1.0.0_RedfishEndpoint' + responses: + "200": + description: Success, return updated RedfishEndpoint resource + schema: + $ref: '#/definitions/RedfishEndpoint.1.0.0_RedfishEndpoint' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to update + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + # /Inventory/RedfishEndpoints/Query: + # post: + # tags: + # - RedfishEndpoint + # summary: >- + # POST RedfishEndpoint query, returning RedfishEndpointArray + # description: >- + # Given payload of locational xnames, retrieve the targeted entries + # in the form of a RedfishEndpointArray. + # operationId: doRedfishEndpointQueryPost + # parameters: + # - name: payload + # in: body + # required: true + # schema: + # $ref: '#/definitions/RedfishEndpointArray_PostQuery' + # responses: + # "200": + # description: >- + # ComponentArray representing results of query. + # schema: + # $ref: '#/definitions/RedfishEndpointArray_RedfishEndpointArray' + # "400": + # description: Bad Request + # schema: + # $ref: '#/definitions/Problem7807' + # "404": + # description: Does Not Exist + # schema: + # $ref: '#/definitions/Problem7807' + # default: + # description: Unexpected error + # schema: + # $ref: '#/definitions/Problem7807' + /Inventory/RedfishEndpoints/Query/{xname}: + get: + tags: + - RedfishEndpoint + summary: >- + Retrieve RedfishEndpoint query for {xname}, returning RedfishEndpointArray + description: >- + Given xname and modifiers in query string, retrieve zero or more + RedfishEndpoint entries in the form of a RedfishEndpointArray. + operationId: doRedfishEndpointQueryGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of RedfishEndpoint to query. + required: true + # - name: type + # in: query + # type: string + # description: >- + # Retrieve xname's children of type={type} instead of {xname}. + # - name: children + # in: query + # type: boolean + # description: >- + # Also return children of the selected RedfishEndpoints. + # - name: parents + # in: query + # type: boolean + # description: >- + # Also return parents of the selected RedfishEndpoints. + # - name: partition + # in: query + # type: string + # description: >- + # Restrict search to the given hard:soft partition. + responses: + "200": + description: >- + RedfishEndpointArray representing results of query. + schema: + $ref: '#/definitions/RedfishEndpointArray_RedfishEndpointArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - no matches + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # ComponentEndpoint API - ComponentEndpoints discovered under Redfish EP + # + ######################################################################## + /Inventory/ComponentEndpoints: + get: + tags: + - ComponentEndpoint + summary: Retrieve ComponentEndpoints Collection + description: >- + Retrieve the full collection of ComponentEndpoints in the form of a + ComponentEndpointArray. Full results can also be filtered by query + parameters. Only the first filter parameter of each type is + used and the parameters are applied in an AND fashion. + If the collection is empty or the filters have no match, an + empty array is returned. + operationId: doComponentEndpointsGet + parameters: + - $ref: '#/parameters/compIDParam' + - name: redfish_ep + in: query + type: string + description: >- + Retrieve all ComponentEndpoints managed by the parent Redfish EP. + - $ref: '#/parameters/compTypeParam' + - name: redfish_type + in: query + type: string + description: >- + Retrieve all ComponentEndpoints with the given Redfish type. + # Not implemented. + # - name: partition + # in: query + # type: string + # description: >- + # Restrict search to the given hard.soft partition. + responses: + "200": + description: >- + ComponentEndpointArray representing the ComponentEndpoint + collection or a filtered subset thereof. + schema: + $ref: '#/definitions/ComponentEndpointArray_ComponentEndpointArray' + examples: + application/json: + ComponentEndpoints: + - ID: x0c0s0b0n0 + Type: Node + Domain: mgmt.example.domain.com + FQDN: x0c0s0b0n0.mgmt.example.domain.com + RedfishType: ComputerSystem + RedfishSubtype: Physical + ComponentEndpointType: ComponentEndpointComputerSystem + MACAddr: d0:94:66:00:aa:37 + UUID: bf9362ad-b29c-40ed-9881-18a5dba3a26b + OdataID: /redfish/v1/Systems/System.Embedded.1 + RedfishEndpointID: x0c0s0b0 + RedfishEndpointFQDN: x0c0s0b0.mgmt.example.domain.com + RedfishURL: + "x0c0s0b0.mgmt.example.domain.com/redfish/v1/Systems\ + /System.Embedded.1" + RedfishSystemInfo: + Name: System Embedded 1 + Actions: + '#ComputerSystem.Reset': + AllowableValues: + - "On" + - "ForceOff" + target: + "/redfish/v1/Systems/System.Embedded.1/Actions\ + /ComputerSystem.Reset" + EthernetNICInfo: + - RedfishId: "1" + "@odata.id": + /redfish/v1/Systems/System.Embedded.1/EthernetInterfaces/1 + Description: Management Network Interface + InterfaceEnabled: true + MACAddress: d0:94:66:00:aa:37, + PermanentMACAddress: d0:94:66:00:aa:37 + - RedfishId: "2" + "@odata.id": + /redfish/v1/Systems/System.Embedded.1/EthernetInterfaces/2 + Description: Management Network Interface + InterfaceEnabled: true + MACAddress: d0:94:66:00:aa:38 + PermanentMACAddress: d0:94:66:00:aa:38 + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - ComponentEndpoint + - cli_danger$This will delete all component endpoints, continue? + summary: >- + Delete all ComponentEndpoints + description: >- + Delete all entries in the ComponentEndpoint collection. + operationId: doComponentEndpointsDeleteAll + responses: + "200": + description: >- + Zero (success) error code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/ComponentEndpoints/{xname}: + get: + tags: + - ComponentEndpoint + summary: Retrieve ComponentEndpoint at {xname} + description: Retrieve ComponentEndpoint record for a specific xname. + operationId: doComponentEndpointGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of ComponentEndpoint record to return. + required: true + responses: + "200": + description: HWInventoryByLocation entry matching xname/ID + schema: + $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + examples: + application/json: + ID: x0c0s0b0n0 + Type: Node + Domain: mgmt.example.domain.com + FQDN: x0c0s0b0n0.mgmt.example.domain.com + RedfishType: ComputerSystem + RedfishSubtype: Physical + ComponentEndpointType: ComponentEndpointComputerSystem + MACAddr: d0:94:66:00:aa:37 + UUID: bf9362ad-b29c-40ed-9881-18a5dba3a26b + OdataID: /redfish/v1/Systems/System.Embedded.1 + RedfishEndpointID: x0c0s0b0 + RedfishEndpointFQDN: x0c0s0b0.mgmt.example.domain.com + RedfishURL: + "x0c0s0b0.mgmt.example.domain.com/redfish/v1/Systems\ + /System.Embedded.1" + RedfishSystemInfo: + Name: System Embedded 1 + Actions: + '#ComputerSystem.Reset': + AllowableValues: + - "On" + - "ForceOff" + target: + "/redfish/v1/Systems/System.Embedded.1/Actions\ + /ComputerSystem.Reset" + EthernetNICInfo: + - RedfishId: "1" + "@odata.id": + "/redfish/v1/Systems/System.Embedded.1\ + /EthernetInterfaces/1" + Description: Management Network Interface + InterfaceEnabled: true + MACAddress: d0:94:66:00:aa:37 + PermanentMACAddress: d0:94:66:00:aa:37 + - RedfishId: "2" + "@odata.id": + "/redfish/v1/Systems/System.Embedded.1\ + /EthernetInterfaces/2" + Description: Management Network Interface + InterfaceEnabled: true + MACAddress: ae:12:ce:7a:aa:99 + PermanentMACAddress: ae:12:ce:7a:aa:99 + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - ComponentEndpoint + summary: Delete ComponentEndpoint with ID {xname} + description: Delete ComponentEndpoint for a specific xname. + operationId: doComponentEndpointDelete + parameters: + - name: xname + in: path + type: string + description: Locational xname of ComponentEndpoint record to delete. + required: true + responses: + "200": + description: + Zero (success) error code - ComponentEndpoint is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + # /Inventory/ComponentEndpoints/Query: + # post: + # tags: + # - ComponentEndpoint + # summary: >- + # POST ComponentEndpoint query, returning ComponentEndpointArray + # description: >- + # Given payload of locational xnames, retrieve the targeted entries + # in the form of a ComponentEndpointArray. + # operationId: doComponentEndpointQueryPost + # parameters: + # - name: payload + # in: body + # required: true + # schema: + # $ref: '#/definitions/ComponentEndpointArray_PostQuery' + # responses: + # "200": + # description: >- + # ComponentArray representing results of query. + # schema: + # $ref: + # '#/definitions/ComponentEndpointArray_ComponentEndpointArray' + # "400": + # description: Bad Request + # schema: + # $ref: '#/definitions/Problem7807' + # "404": + # description: Does Not Exist + # schema: + # $ref: '#/definitions/Problem7807' + # default: + # description: Unexpected error + # schema: + # $ref: '#/definitions/Problem7807' + # /Inventory/ComponentEndpoints/Query/{xname}: + # get: + # tags: + # - ComponentEndpoint + # summary: >- + # GET ComponentEndpoint query for {xname}, returning + # ComponentEndpointArray + # description: >- + # Given xname and modifiers in query string, retrieve zero or more + # ComponentEndpoint entries in the form of a ComponentEndpointArray. + # operationId: doComponentEndpointQueryGet + # parameters: + # - name: xname + # in: path + # type: string + # description: Locational xname of ComponentEndpoint to query. + # required: true + # - name: type + # in: query + # type: string + # description: >- + # Retrieve xname's children of type={type} instead of {xname}. + # - name: children + # in: query + # type: boolean + # description: >- + # Also return children of the selected ComponentEndpoints. + # - name: parents + # in: query + # type: boolean + # description: >- + # Also return parents of the selected ComponentEndpoints. + # - name: partition + # in: query + # type: string + # description: >- + # Restrict search to the given hard:soft partition. + # responses: + # "200": + # description: >- + # ComponentEndpointArray representing results of query. + # schema: + # $ref: + # '#/definitions/ComponentEndpointArray_ComponentEndpointArray' + # "400": + # description: Bad Request + # schema: + # $ref: '#/definitions/Problem7807' + # "404": + # description: Does Not Exist + # schema: + # $ref: '#/definitions/Problem7807' + # default: + # description: Unexpected error + # schema: + # $ref: '#/definitions/Problem7807' + ######################################################################## + # + # ServiceEndpoint API - ServiceEndpoints discovered under Redfish EP + # + ######################################################################## + /Inventory/ServiceEndpoints: + get: + tags: + - ServiceEndpoint + summary: Retrieve ServiceEndpoints Collection + description: >- + Retrieve the full collection of ServiceEndpoints in the form of a + ServiceEndpointArray. Full results can also be filtered by query + parameters. Only the first filter parameter of each type is + used and the parameters are applied in an AND fashion. + If the collection is empty or the filters have no match, an + empty array is returned. + operationId: doServiceEndpointsGetAll + parameters: + - name: redfish_ep + in: query + type: string + description: >- + Retrieve all ServiceEndpoints managed by the parent Redfish EP. + Can be repeated to select groups of endpoints. + - name: service + in: query + type: string + description: >- + Retrieve all ServiceEndpoints of the given Redfish service. + # Not implemented. + # - name: partition + # in: query + # type: string + # description: >- + # Restrict search to the given hard.soft partition. + responses: + "200": + description: >- + ServiceEndpointArray representing the ServiceEndpoint + collection or a filtered subset thereof. + schema: + $ref: '#/definitions/ServiceEndpointArray_ServiceEndpointArray' + examples: + application/json: + ServiceEndpoints: + RedfishEndpointID: x0c0s0b0 + RedfishType: UpdateService + RedfishSubtype: Other + UUID: bf9362ad-b29c-40ed-9881-18a5dba3a26b + OdataID: "/redfish/v1/UpdateService" + RedfishEndpointFQDN: "x0c0s0b0.mgmt.example.domain.com" + RedfishURL: + "x0c0s0b0.mgmt.example.domain.com/redfish/v1/UpdateService" + ServiceInfo: + '@odata.context': + "/redfish/v1/$metadata#UpdateService.UpdateService" + '@odata.id': "/redfish/v1/UpdateService" + '@odata.type': "#UpdateService.v1_1_0.UpdateService" + ID: UpdateService + Name: Update Service + Actions: + '#UpdateService.SimpleUpdate': + target: + "/redfish/v1/Systems/System.Embedded.1/Actions\ + /ComputerSystem.Reset" + title: "" + FirmwareInventory: + "@odata.id": "/redfish/v1/UpdateService/FirmwareInventory" + SoftwareInventory: + "@odata.id": "/redfish/v1/UpdateService/SoftwareInventory" + ServiceEnabled: "True" + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - ServiceEndpoint + - cli_danger$This will delete all service endpoints, continue? + summary: >- + Delete all ServiceEndpoints + description: >- + Delete all entries in the ServiceEndpoint collection. + operationId: doServiceEndpointsDeleteAll + responses: + "200": + description: >- + Zero (success) error code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/ServiceEndpoints/{service}: + get: + tags: + - ServiceEndpoint + summary: Retrieve all ServiceEndpoints of a {service} + description: >- + Retrieve all ServiceEndpoint records for the Redfish service. + operationId: doServiceEndpointsGet + parameters: + - name: service + in: path + type: string + description: >- + The Redfish service type of the ServiceEndpoint records to return. + required: true + - name: redfish_ep + in: query + type: string + description: >- + Retrieve all ServiceEndpoints of type {service} managed by + the parent Redfish EP. Can be repeated to select groups of + endpoints. + responses: + "200": + description: >- + ServiceEndpointArray representing the subset of the ServiceEndpoint + collection filtered by {service} or additionally filtered + thereof. + schema: + $ref: '#/definitions/ServiceEndpointArray_ServiceEndpointArray' + examples: + application/json: + ServiceEndpoints: + RedfishEndpointID: x0c0s0b0 + RedfishType: UpdateService + RedfishSubtype: Other + UUID: bf9362ad-b29c-40ed-9881-18a5dba3a26b + OdataID: "/redfish/v1/UpdateService" + RedfishEndpointFQDN: "x0c0s0b0.mgmt.example.domain.com" + RedfishURL: + "x0c0s0b0.mgmt.example.domain.com/redfish/v1/UpdateService" + ServiceInfo: + '@odata.context': + "/redfish/v1/$metadata#UpdateService.UpdateService" + '@odata.id': "/redfish/v1/UpdateService" + '@odata.type': "#UpdateService.v1_1_0.UpdateService" + ID: UpdateService + Name: Update Service + Actions: + '#UpdateService.SimpleUpdate': + target: + "/redfish/v1/Systems/System.Embedded.1/Actions\ + /ComputerSystem.Reset" + title: "" + FirmwareInventory: + "@odata.id": "/redfish/v1/UpdateService/FirmwareInventory" + SoftwareInventory: + "@odata.id": "/redfish/v1/UpdateService/SoftwareInventory" + ServiceEnabled: "True" + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Service does not exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/ServiceEndpoints/{service}/RedfishEndpoints/{xname}: + get: + tags: + - ServiceEndpoint + summary: Retrieve the ServiceEndpoint of a {service} managed by {xname} + description: Retrieve the ServiceEndpoint for a Redfish service that is managed by xname. + operationId: doServiceEndpointGet + parameters: + - name: service + in: path + type: string + description: >- + The Redfish service type of the ServiceEndpoint record to return. + required: true + - name: xname + in: path + type: string + description: >- + The locational xname of the RedfishEndpoint that manages the + ServiceEndpoint record to return. + required: true + responses: + "200": + description: >- + ServiceEndpoint entry matching {service}/{xname} + schema: + $ref: '#/definitions/ServiceEndpoint.1.0.0_ServiceEndpoint' + examples: + application/json: + RedfishEndpointID: x0c0s0b0 + RedfishType: UpdateService + RedfishSubtype: Other + UUID: bf9362ad-b29c-40ed-9881-18a5dba3a26b + OdataID: "/redfish/v1/UpdateService" + RedfishEndpointFQDN: "x0c0s0b0.mgmt.example.domain.com" + RedfishURL: + "x0c0s0b0.mgmt.example.domain.com/redfish/v1/UpdateService" + ServiceInfo: + '@odata.context': + "/redfish/v1/$metadata#UpdateService.UpdateService" + '@odata.id': "/redfish/v1/UpdateService" + '@odata.type': "#UpdateService.v1_1_0.UpdateService" + ID: UpdateService + Name: Update Service + Actions: + '#UpdateService.SimpleUpdate': + target: + "/redfish/v1/Systems/System.Embedded.1/Actions\ + /ComputerSystem.Reset" + title: "" + FirmwareInventory: + "@odata.id": "/redfish/v1/UpdateService/FirmwareInventory" + SoftwareInventory: + "@odata.id": "/redfish/v1/UpdateService/SoftwareInventory" + ServiceEnabled: "True" + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - ServiceEndpoint + summary: Delete the {service} ServiceEndpoint managed by {xname} + description: Delete the {service} ServiceEndpoint managed by {xname} + operationId: doServiceEndpointDelete + parameters: + - name: service + in: path + type: string + description: >- + The Redfish service type of the ServiceEndpoint record to delete. + required: true + - name: xname + in: path + type: string + description: >- + The locational xname of the RedfishEndpoint that manages the + ServiceEndpoint record to delete. + required: true + responses: + "200": + description: Zero (success) error code - ServiceEndpoint is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - no matching ServiceEndpoint to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Component Ethernet Interfaces V2 API + # + ######################################################################## + /Inventory/EthernetInterfaces: + get: + tags: + - ComponentEthernetInterfaces + summary: >- + GET ALL existing component Ethernet interfaces + description: >- + Get all component Ethernet interfaces that currently exist, optionally filtering + the set, returning an array of component Ethernet interfaces. + operationId: doCompEthInterfacesGetV2 + parameters: + - name: MACAddress + in: query + type: string + description: >- + Retrieve the component Ethernet interface with the provided MAC address. Can be + repeated to select multiple component Ethernet interfaces. + - name: IPAddress + in: query + type: string + description: >- + Retrieve the component Ethernet interface with the provided IP address. Can be + repeated to select multiple component Ethernet interfaces. A blank string will + retrieve component Ethernet interfaces that have no IP address. + - name: Network + in: query + type: string + description: >- + Retrieve the component Ethernet interface with a IP addresses on the provided + network. Can be repeated to select multiple component Ethernet interfaces. A blank + string will retrieve component Ethernet interfaces that have an IP address with no + network. + - name: ComponentID + in: query + type: string + description: >- + Retrieve all component Ethernet interfaces with the provided component ID. Can be + repeated to select multiple component Ethernet interfaces. + - name: Type + in: query + type: string + description: >- + Retrieve all component Ethernet interfaces with the provided parent HMS type. Can be + repeated to select multiple component Ethernet interfaces. + - name: OlderThan + in: query + type: string + description: >- + Retrieve all component Ethernet interfaces that were last updated before the + specified time. This takes an RFC3339 formatted string (2006-01-02T15:04:05Z07:00). + - name: NewerThan + in: query + type: string + description: >- + Retrieve all component Ethernet interfaces that were last updated after the + specified time. This takes an RFC3339 formatted string (2006-01-02T15:04:05Z07:00). + responses: + "200": + description: >- + An array containing all existing component Ethernet interface objects. + schema: + type: array + items: + $ref: '#/definitions/CompEthInterface.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - ComponentEthernetInterfaces + summary: CREATE a new component Ethernet interface (via POST) + description: >- + Create a new component Ethernet interface. + operationId: doCompEthInterfacePostV2 + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/CompEthInterface.1.0.0' + responses: + "201": + description: >- + Success, returns array containing the created resource URI. + schema: + $ref: '#/definitions/ResourceURI.1.0.0' + examples: + application/json: + uri: /hsm/v2/Inventory/a4bf012b7311 + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate component Ethernet interface would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - ComponentEthernetInterfaces + - cli_danger$This will delete all component Ethernet interfaces, continue? + summary: >- + Clear the component Ethernet interface collection. + description: >- + Delete all component Ethernet interface entries. + operationId: doCompEthInterfaceDeleteAllV2 + responses: + "200": + description: >- + Zero (success) response code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/EthernetInterfaces/{ethInterfaceID}: + get: + tags: + - ComponentEthernetInterfaces + summary: GET existing component Ethernet interface {ethInterfaceID} + description: >- + Retrieve the component Ethernet interface which was created with the given {ethInterfaceID}. + operationId: doCompEthInterfaceGetV2 + parameters: + - name: ethInterfaceID + in: path + type: string + description: The ID of the component Ethernet interface to return. + required: true + responses: + "200": + description: Component Ethernet interface entry identified by {ethInterfaceID}, if it exists. + schema: + $ref: '#/definitions/CompEthInterface.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - ComponentEthernetInterfaces + summary: DELETE existing component Ethernet interface with {ethInterfaceID} + description: >- + Delete the given component Ethernet interface with {ethInterfaceID}. + operationId: doCompEthInterfaceDeleteV2 + parameters: + - name: ethInterfaceID + in: path + type: string + description: The ID of the component Ethernet interface to delete. + required: true + responses: + "200": + description: Zero (success) error code - component Ethernet interface is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - No component Ethernet interface with ethInterfaceID. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + patch: + tags: + - ComponentEthernetInterfaces + summary: UPDATE metadata for existing component Ethernet interface {ethInterfaceID} + description: >- + To update the IP address, CompID, and/or description of a component Ethernet interface, + a PATCH operation can be used. Omitted fields are not updated. + The 'LastUpdate' field will be updated if an IP address is provided. + operationId: doCompEthInterfacePatchV2 + parameters: + - name: ethInterfaceID + in: path + type: string + description: >- + The ID of the component Ethernet interface to update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/CompEthInterface.1.0.0_Patch' + responses: + "200": + description: Success + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: The component Ethernet interface with this ethInterfaceID does not exist. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/EthernetInterfaces/{ethInterfaceID}/IPAddresses: + get: + tags: + - ComponentEthernetInterfaces + summary: >- + Retrieve all IP addresses of a component Ethernet interface {ethInterfaceID} + description: >- + Retrieve all IP addresses of a component Ethernet interface {ethInterfaceID} + operationId: doCompEthInterfaceIPAddressesGetV2 + parameters: + - name: ethInterfaceID + in: path + type: string + description: The ID of the component Ethernet interface to retrieve the IP addresses of. + required: true + responses: + "200": + description: IP addresses of the component Ethernet interface entry identified by {ethInterfaceID}, if it exists. + schema: + type: array + items: + $ref: '#/definitions/CompEthInterface.1.0.0_IPAddressMapping' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - ComponentEthernetInterfaces + summary: CREATE a new IP address mapping in a component Ethernet interface (via POST) + description: >- + Create a new IP address mapping in a component Ethernet interface {ethInterfaceID}. + operationId: doCompEthInterfaceIPAddressesPostV2 + parameters: + - name: ethInterfaceID + in: path + type: string + description: The ID of the component Ethernet interface to add the IP address to. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/CompEthInterface.1.0.0_IPAddressMapping' + responses: + "201": + description: >- + Success, returns the created resource URI. + schema: + $ref: '#/definitions/ResourceURI.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate IP address in component Ethernet interface would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/EthernetInterfaces/{ethInterfaceID}/IPAddresses/{ipAddress}: + patch: + tags: + - ComponentEthernetInterfaces + summary: >- + UPDATE metadata for existing IP address {ipAddress} in a component Ethernet interface {ethInterfaceID + description: >- + "To update the network of an IP address in a component Ethernet interface, + a PATCH operation can be used. Omitted fields are not updated. + The 'LastUpdate' field of the component Ethernet interface will be updated" + operationId: doCompEthInterfaceIPAddressPatchV2 + parameters: + - name: ethInterfaceID + in: path + type: string + description: The ID of the component Ethernet interface with the IP address to patch. + required: true + - name: ipAddress + in: path + type: string + description: The IP address to patch from the component Ethernet interface. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/CompEthInterface.1.0.0_IPAddressMapping_Patch' + responses: + "200": + description: Success + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - No IP address with ipAddress exists on the specified component Ethernet interface. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - ComponentEthernetInterfaces + summary: DELETE existing IP address mapping with {ipAddress} from a component Ethernet interface with {ethInterfaceID} + description: >- + Delete the given IP address mapping with {ipAddress} from a component Ethernet interface with {ethInterfaceID}. + The 'LastUpdate' field of the component Ethernet interface will be updated" + operationId: doCompEthInterfaceIPAddressDeleteV2 + parameters: + - name: ethInterfaceID + in: path + type: string + description: The ID of the component Ethernet interface to delete the IP address from + required: true + - name: ipAddress + in: path + type: string + description: The IP address to delete from the component Ethernet interface. + required: true + responses: + "200": + description: Zero (success) error code - IP address mapping is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - No IP address with ipAddress exists on the specified component Ethernet interface + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Discovery API Calls - Discover action and DiscoveryStatus + # + ######################################################################## + /Inventory/DiscoveryStatus: + get: + tags: + - DiscoveryStatus + summary: >- + Retrieve all DiscoveryStatus entries in collection + description: >- + Retrieve all DiscoveryStatus entries as an unnamed array. + operationId: doDiscoveryStatusGetAll + responses: + "200": + description: >- + Unnamed DiscoveryStatus array representing all entries + in collection. + schema: + type: array + items: + $ref: '#/definitions/DiscoveryStatus.1.0.0_DiscoveryStatus' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/DiscoveryStatus/{id}: + get: + tags: + - DiscoveryStatus + summary: >- + Retrieve DiscoveryStatus entry matching {id} + description: >- + Retrieve DiscoveryStatus entry with the specific ID. + operationId: doDiscoveryStatusGet + parameters: + - name: id + in: path + type: number + format: int32 + description: Positive integer ID of DiscoveryStatus entry to retrieve + required: true + responses: + "200": + description: >- + Success. Returns matching DiscoveryStatus entry. + schema: + $ref: '#/definitions/DiscoveryStatus.1.0.0_DiscoveryStatus' + "400": + description: Bad Request, e.g. not a positive integer + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Not found (no such ID) + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Inventory/Discover: + post: + tags: + - Discover + summary: Create Discover operation request + description: >- + Discover and populate database with component data (ComponentEndpoints, + HMS Components, HWInventory) based on interrogating RedfishEndpoint + entries. If not all RedfishEndpoints should be discovered, an + array of xnames can be provided in the DiscoverInput payload. + operationId: doInventoryDiscoverPost + parameters: + - name: payload + in: body + required: false + schema: + $ref: '#/definitions/Discover.1.0.0_DiscoverInput' + responses: + "200": + description: >- + Success, discovery started. DiscoverStatus link(s) to check in + returned URI array. + schema: + type: array + items: + $ref: '#/definitions/ResourceURI.1.0.0' + examples: + application/json: + - URI: /hsm/v2/Inventory/DiscoveryStatus/0 + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: >- + One or more requested RedfishEndpoint xname IDs was not found. + schema: + $ref: '#/definitions/Problem7807' + "409": + description: >- + Conflict. One or more DiscoveryStatus objects is InProgress + or Pending and prevents this operation from starting. Try again + later or use force option (should never be needed unless some + kind of problem has occurred). Simultaneous discoveries could + cause one or both to fail. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ########################################################################## + # + # Node State Change Notification API - Subscribe to receive node SCNs from HSM + # + ########################################################################## + /Subscriptions/SCN: + post: + tags: + - SCN + - cli_ignore + summary: Create a subscription for state change notifications + description: >- + Request a subscription for state change notifications for a set of + component states. This will create a new subscription and produce + a unique ID for the subscription. This will not affect the existing + subscriptions. + operationId: doPostSCNSubscription + consumes: + - application/json + produces: + - application/json + parameters: + - in: body + name: payload + required: true + schema: + $ref: '#/definitions/Subscriptions_SCNPostSubscription' + responses: + "200": + description: >- + A new subscription was created. The subscription ID is + included in the response. + schema: + $ref: '#/definitions/Subscriptions_SCNSubscriptionArrayItem.1.0.0' + "400": + description: + Bad Request. Malformed JSON. Verify all JSON formatting in payload. + schema: + $ref: '#/definitions/Problem7807' + "409": + description: >- + The subscription already exists for the specified subscriber + and URL. + schema: + $ref: '#/definitions/Problem7807' + "500": + description: Database error. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - SCN + - cli_ignore + summary: Delete all state change notification subscriptions + description: >- + Delete all subscriptions. + operationId: doDeleteSCNSubscriptionsAll + responses: + "200": + description: Success. Subscriptions deleted successfully. + "500": + description: Database error. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + get: + tags: + - SCN + - cli_ignore + summary: Retrieve currently-held state change notification subscriptions + description: >- + Retrieve all information on currently held state change notification + subscriptions. + operationId: doGetSCNSubscriptionsAll + produces: + - application/json + responses: + "200": + description: Success. Currently held subscriptions are returned. + schema: + $ref: '#/definitions/Subscriptions_SCNSubscriptionArray' + "500": + description: Database error. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /Subscriptions/SCN/{id}: + put: + tags: + - SCN + - cli_ignore + summary: Update a subscription for state change notifications + description: >- + Update an existing state change notification subscription in whole. + This will overwrite the specified subscription. + operationId: doPutSCNSubscription + consumes: + - application/json + parameters: + - name: id + in: path + type: string + description: >- + This is the ID associated with the subscription that was generated + at its creation. + required: true + - in: body + name: payload + required: true + schema: + $ref: '#/definitions/Subscriptions_SCNPostSubscription' + responses: + "204": + description: Success. The subscription has been overwritten. + "400": + description: >- + Bad Request. Malformed JSON. Verify all JSON formatting in payload. + schema: + $ref: '#/definitions/Problem7807' + "404": + description: The subscription does not exist. + schema: + $ref: '#/definitions/Problem7807' + "500": + description: Database error. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + patch: + tags: + - SCN + - cli_ignore + summary: Update a subscription for state change notifications + description: >- + Update a subscription for state change notifications to add or remove triggers. + operationId: doPatchSCNSubscription + consumes: + - application/json + parameters: + - name: id + in: path + type: string + description: >- + This is the ID associated with the subscription that was generated + at its creation. + required: true + - in: body + name: payload + required: true + schema: + $ref: '#/definitions/Subscriptions_SCNPatchSubscription' + responses: + "204": + description: Success. + "400": + description: + Bad Request. Malformed JSON. Verify all JSON formatting in payload. + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + "500": + description: Internal server error. Database error. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - SCN + - cli_ignore + summary: Delete a state change notification subscription + description: >- + Delete a state change notification subscription. + operationId: doDeleteSCNSubscription + parameters: + - name: id + in: path + type: string + description: >- + This is the ID associated with the subscription that was generated + at its creation. + required: true + responses: + "200": + description: Success. Subscription deleted successfully. + "400": + description: Bad Request. + schema: + $ref: '#/definitions/Problem7807' + "500": + description: Database error. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + get: + tags: + - SCN + - cli_ignore + summary: Retrieve a currently-held state change notification subscription + description: >- + Return the information on a currently held state change notification + subscription + operationId: doGetSCNSubscription + produces: + - application/json + parameters: + - name: id + in: path + type: string + description: >- + This is the ID associated with the subscription that was generated + at its creation. + required: true + responses: + "200": + description: Success. A currently held subscription is returned. + schema: + $ref: '#/definitions/Subscriptions_SCNPostSubscription' + "500": + description: Database error. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Group API Calls + # + ######################################################################## + /groups: + get: + tags: + - Group + summary: >- + Retrieve all existing groups + description: >- + Retrieve all groups that currently exist, optionally filtering + the set, returning an array of groups. + operationId: doGroupsGet + parameters: + - name: group + in: query + type: string + description: >- + Retrieve the group with the provided group label. Can be + repeated to select multiple groups. + - name: tag + in: query + type: string + description: >- + Retrieve all groups associated with the given free-form tag from + the tags field. + responses: + "200": + description: >- + Groups array containing all existing group objects. + schema: + type: array + items: + $ref: '#/definitions/Group.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - Group + summary: Create a new group + description: >- + Create a new group identified by the group_label field. + Label should be given explicitly, and should not conflict with + any existing group, or an error will occur. + + + Note that if the exclusiveGroup field is present, the group is not allowed to add + a member that exists under a different group/label where the exclusiveGroup + field is the same. This can be used to create groups of groups + where a component may only be present in one of the set. + operationId: doGroupsPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Group.1.0.0' + responses: + "201": + description: >- + Success, returns array containing the created resource URI. + schema: + type: array + items: + $ref: '#/definitions/ResourceURI.1.0.0' + examples: + application/json: + - uri: /hsm/v2/groups/mygrouplabel + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate resource would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /groups/{group_label}: + get: + tags: + - Group + summary: Retrieve existing group {group_label} + description: >- + Retrieve the group which was created with the given {group_label}. + operationId: doGroupGet + parameters: + - name: group_label + in: path + type: string + description: Label name of the group to return. + required: true + - name: partition + in: query + type: string + description: >- + AND the members set by the given partition name (p#.#). NULL will + return the group members not in ANY partition. + responses: + "200": + description: Group entry identified by {group_label}, if it exists. + schema: + $ref: '#/definitions/Group.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - Group + summary: Delete existing group with {group_label} + description: >- + Delete the given group with {group_label}. Any members previously in + the group will no longer have the deleted group label associated with + them. + operationId: doGroupDelete + parameters: + - name: group_label + in: path + type: string + description: Label (i.e. name) of the group to delete. + required: true + responses: + "200": + description: Zero (success) error code - component is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - No group matches label. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + patch: + tags: + - Group + summary: Update metadata for existing group {group_label} + description: >- + To update the tags array and/or description, a PATCH operation can + be used. Omitted fields are not updated. This cannot be + used to completely replace the members list. Rather, individual + members can be removed or added with the POST/DELETE + {group_label}/members API below. + operationId: doGroupPatch + parameters: + - name: group_label + in: path + type: string + description: >- + Label (i.e. name) of the group to update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Group.1.0.0_Patch' + responses: + "204": + description: Success + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: The group with this label did not exist. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /groups/labels: + get: + tags: + - Group + summary: >- + Retrieve all existing group labels + description: >- + Retrieve a string array of all group labels (i.e. group names) that + currently exist in HSM. + operationId: doGroupLabelsGet + responses: + "200": + description: >- + Array of group labels which form the names of all existing groups, + or an empty array if none currently exist. + schema: + type: array + items: + type: string + examples: + application/json: + - blue + - green + - red + - compute_a + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ########################################################################## + # Group Members + ########################################################################## + /groups/{group_label}/members: + get: + tags: + - Group + summary: >- + Retrieve all members of existing group + description: >- + Retrieve members of an existing group {group_label}, optionally + filtering the set, returning a members set containing the component + xname IDs. + operationId: doGroupMembersGet + parameters: + - name: group_label + in: path + type: string + required: true + description: >- + Specifies an existing group {group_label} to query the members of. + - name: partition + in: query + type: string + description: >- + AND the members set by the given partition name (p#.#). NULL will + return the group members not in ANY partition. + responses: + "200": + description: >- + Members set including component xname IDs which are members + of group {group_label}. If none exist, an empty array with be + returned. + schema: + $ref: '#/definitions/Members.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does not exist - No such group {group_label} + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - Group + summary: Create new member of existing group (via POST) + description: >- + Create a new member of group {group_label} with the component + xname ID provided in the payload. + New member should not already exist in the given group. + operationId: doGroupMembersPost + parameters: + - name: group_label + in: path + type: string + required: true + description: >- + Specifies an existing group {group_label} to add the new member to. + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/MemberID' + responses: + "201": + description: >- + Success, returns array containing the created member URI. + schema: + type: array + items: + $ref: '#/definitions/ResourceURI.1.0.0' + examples: + application/json: + - uri: /hsm/v2/groups/mygrouplabel/members/x0c0s1b0n0 + "400": + description: Bad Request - e.g. malformed string + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does not exist - No such group {group_label} + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate resource would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /groups/{group_label}/members/{xname_id}: + delete: + tags: + - Group + summary: >- + Delete member from existing group + description: >- + Delete component {xname_id} from the members of group {group_label}. + operationId: doGroupMemberDelete + parameters: + - name: group_label + in: path + type: string + required: true + description: >- + Specifies an existing group {group_label} to remove the member from. + - name: xname_id + in: path + type: string + required: true + description: >- + Member of {group_label} to remove. + responses: + "200": + description: >- + Zero (success) error code - entry deleted. + Message contains count of deleted items (should always be one). + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request, malformed group label or component xname_id + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - no such member or group. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Partition API Calls + # + ######################################################################## + /partitions: + get: + tags: + - Partition + summary: >- + Retrieve all existing partitions + description: >- + Retrieve all partitions that currently exist, optionally filtering + the set, returning an array of partition records. + operationId: doPartitionsGet + parameters: + - name: partition + in: query + type: string + description: >- + Retrieve the partition with the provided partition name (p#.#). Can be + repeated to select multiple partitions. + - name: tag + in: query + type: string + description: >- + Retrieve all partitions associated with the given free-form tag + from the tags field. + responses: + "200": + description: >- + Array containing all existing partition objects. + schema: + type: array + items: + $ref: '#/definitions/Partition.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - Partition + summary: Create new partition (via POST) + description: >- + Create a new partition identified by the partition_name field. Partition + names should be of the format p# or p#.# (hard_part.soft_part). + Partition name should be given explicitly, and should not conflict with + any existing partition, or an error will occur. In addition, the + member list must not overlap with any existing partition. + operationId: doPartitionsPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Partition.1.0.0' + responses: + "201": + description: >- + Success, returns array containing the created resource URI. + schema: + type: array + items: + $ref: '#/definitions/ResourceURI.1.0.0' + examples: + application/json: + - uri: /hsm/v2/partitions/p1 + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate resource would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /partitions/{partition_name}: + get: + tags: + - Partition + summary: Retrieve existing partition {partition_name} + description: >- + Retrieve the partition which was created with the given + {partition_name}. + operationId: doPartitionGet + parameters: + - name: partition_name + in: path + type: string + description: Partition name to be retrieved + required: true + responses: + "200": + description: + Partition entry identified by {partition_name}, if it exists. + schema: + $ref: '#/definitions/Partition.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - Partition + summary: Delete existing partition with {partition_name} + description: >- + Delete partition {partition_name}. Any members previously in the + partition will no longer have the deleted partition name associated + with them. + operationId: doPartitionDelete + parameters: + - name: partition_name + in: path + type: string + description: Partition name of the partition to delete. + required: true + responses: + "200": + description: Zero (success) error code - component is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - No partition matches partition_name. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + patch: + tags: + - Partition + summary: Update metadata for existing partition {partition_name} + description: >- + Update the tags array and/or description by using PATCH. + Omitted fields are not updated. This cannot be used + to completely replace the members list. Rather, individual members + can be removed or added with the POST/DELETE {partition_name}/members + API. + operationId: doPartitionPatch + parameters: + - name: partition_name + in: path + type: string + description: >- + Name of the partition to update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/Partition.1.0.0_Patch' + responses: + "204": + description: Success + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: The partition with this partition_name did not exist. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /partitions/names: + get: + tags: + - Partition + summary: >- + Retrieve all existing partition names + description: >- + Retrieve a string array of all partition names that currently exist in HSM. + These are just the names, not the complete partition records. + operationId: doPartitionNamesGet + responses: + "200": + description: >- + Array of partition names comprising all partitions known to HSM + at the present time, or an empty array if none currently exist. + schema: + type: array + items: + type: string + examples: + application/json: + - p1 + - p2 + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ########################################################################## + # Partition Members + ########################################################################## + /partitions/{partition_name}/members: + get: + tags: + - Partition + summary: >- + Retrieve all members of existing partition + description: >- + Retrieve all members of existing partition {partition_name}, optionally + filtering the set, returning a members set that includes the component + xname IDs. + operationId: doPartitionMembersGet + parameters: + - name: partition_name + in: path + type: string + required: true + description: >- + Existing partition {partition_name} to query the members of. + responses: + "200": + description: >- + Members set including component xname IDs which are members + of partition {partition_name}. If none exist, an empty array will + be returned. + schema: + $ref: '#/definitions/Members.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does not exist - No such partition {partition_name} + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - Partition + summary: Create new member of existing partition (via POST) + description: >- + Create a new member of partition {partition_name} with the component + xname ID provided in the payload. + New member should not already exist in the given partition + operationId: doPartitionMembersPost + parameters: + - name: partition_name + in: path + type: string + required: true + description: >- + Existing partition {partition_name} to add the new member to. + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/MemberID' + responses: + "201": + description: >- + Success, returns array containing the created member URI. + schema: + type: array + items: + $ref: '#/definitions/ResourceURI.1.0.0' + examples: + application/json: + - uri: /hsm/v2/partitions/p1/members/x0c0s1b0n0 + "400": + description: Bad Request - Bad partition_name or malformed string? + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does not exist - No such partition {partition_name} + schema: + $ref: '#/definitions/Problem7807' + "409": + description: Conflict. Duplicate resource would be created. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /partitions/{partition_name}/members/{xname_id}: + delete: + tags: + - Partition + summary: >- + Delete member from existing partition + description: >- + Delete component {xname_id} from the members of partition + {partition_name}. + operationId: doPartitionMemberDelete + parameters: + - name: partition_name + in: path + type: string + required: true + description: >- + Existing partition {partition_name} to remove the member from. + - name: xname_id + in: path + type: string + required: true + description: >- + Member of {partition_name} to remove. + responses: + "200": + description: >- + Zero (success) error code - entry deleted. + Message contains count of deleted items (should always be one). + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request, malformed partition_name or xname_id + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - no such member or partition. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Memberships (of Groups and Partitions) API Calls + # + ######################################################################## + /memberships: + get: + tags: + - Membership + summary: >- + Retrieve all memberships for components + description: >- + Display group labels and partition names for each component xname ID + (where applicable). + operationId: doMembershipsGet + parameters: + - $ref: '#/parameters/compIDParam' + - $ref: '#/parameters/compTypeParam' + - $ref: '#/parameters/compStateParam' + - $ref: '#/parameters/compFlagParam' + - $ref: '#/parameters/compRoleParam' + - $ref: '#/parameters/compSubroleParam' + - $ref: '#/parameters/compEnabledParam' + - $ref: '#/parameters/compSoftwareStatusParam' + - $ref: '#/parameters/compSubtypeParam' + - $ref: '#/parameters/compArchParam' + - $ref: '#/parameters/compClassParam' + - $ref: '#/parameters/compNIDParam' + - $ref: '#/parameters/compNIDStartParam' + - $ref: '#/parameters/compNIDEndParam' + - $ref: '#/parameters/compPartitionParam' + - $ref: '#/parameters/compGroupParam' + responses: + "200": + description: >- + Array containing component xname IDs to their group and + partition memberships. + schema: + type: array + items: + $ref: '#/definitions/Membership.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /memberships/{xname}: + get: + tags: + - Membership + summary: >- + Retrieve membership for component {xname} + description: >- + Display group labels and partition names for a given component xname ID. + operationId: doMembershipGet + parameters: + - name: xname + in: path + type: string + description: Component xname ID (i.e. locational identifier) + required: true + responses: + "200": + description: >- + Membership info for component at {xname} + schema: + $ref: '#/definitions/Membership.1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Not Found - no such xname. + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + ######################################################################## + # + # Power Maps - Component to power supply mapping. + # + ######################################################################## + /sysinfo/powermaps: + get: + tags: + - PowerMap + summary: >- + Retrieve all PowerMaps, returning PowerMapArray + description: >- + Retrieve all power map entries as a named array, or an empty array if the + collection is empty. + operationId: doPowerMapsGet + responses: + "200": + description: >- + Named PowerMaps array. + schema: + $ref: '#/definitions/PowerMapArray_PowerMapArray' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + post: + tags: + - PowerMap + summary: Create or Modify PowerMaps + description: >- + Create or update the given set of PowerMaps whose ID fields are each a + valid xname. The poweredBy field is required. + operationId: doPowerMapsPost + parameters: + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/PowerMapArray_PowerMapArray' + responses: + "200": + description: >- + Zero (success) error code - one or more entries created or + updated. Message contains count of new/modified items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - PowerMap + - cli_ignore + summary: >- + Delete all PowerMap entities + description: >- + Delete all entries in the PowerMaps collection. + operationId: doPowerMapsDeleteAll + responses: + "200": + description: >- + Zero (success) error code - one or more entries deleted. + Message contains count of deleted items. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist - Collection is empty + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + /sysinfo/powermaps/{xname}: + get: + tags: + - PowerMap + summary: Retrieve PowerMap at {xname} + description: >- + Retrieve PowerMap for a component located at physical location {xname}. + operationId: doPowerMapGet + parameters: + - name: xname + in: path + type: string + description: Locational xname of PowerMap record to return. + required: true + responses: + "200": + description: PowerMap entry matching xname/ID + schema: + $ref: '#/definitions/PowerMap.1.0.0_PowerMap' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: Does Not Exist + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + delete: + tags: + - PowerMap + summary: Delete PowerMap with ID {xname} + description: >- + Delete PowerMap entry for a specific component {xname}. + operationId: doPowerMapDelete + parameters: + - name: xname + in: path + type: string + description: Locational xname of PowerMap record to delete. + required: true + responses: + "200": + description: Zero (success) error code - PowerMap is deleted. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + "404": + description: XName does Not Exist - no matching ID to delete + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' + put: + tags: + - PowerMap + summary: Update definition for PowerMap ID {xname} + description: >- + Update or create an entry for an individual component xname using PUT. + If the PUT operation contains an xname that already exists, the entry + will be overwritten with the new entry. + operationId: doPowerMapPut + parameters: + - name: xname + in: path + type: string + description: >- + Locational xname of PowerMap record to create or update. + required: true + - name: payload + in: body + required: true + schema: + $ref: '#/definitions/PowerMap.1.0.0_PowerMap' + responses: + "200": + description: Zero (success) error code - PowerMap was created/updated. + schema: + $ref: '#/definitions/Response_1.0.0' + "400": + description: Bad Request + schema: + $ref: '#/definitions/Problem7807' + default: + description: Unexpected error + schema: + $ref: '#/definitions/Problem7807' +definitions: + ########################################################################## + # + # State Component data structures + # + ########################################################################## + Component.1.0.0_Component: + description: >- + This is the logical representation of a component for which state is + tracked and includes other variables that may be needed by clients. + It is keyed by the physical location i.e. xname. + properties: + ID: + # Uniquely identifies the component by its physical location (xname) + $ref: '#/definitions/XName.1.0.0' + Type: + # HMS Logical component type e.g. Node, Cabinet, etc. + $ref: '#/definitions/HMSType.1.0.0' + State: + # Logical component state e.g. On, Off, Ready. + $ref: '#/definitions/HMSState.1.0.0' + Flag: + # Logical component state flag e.g. OK, Alert. + $ref: '#/definitions/HMSFlag.1.0.0' + Enabled: + description: >- + Whether component is enabled. True when enabled, false when disabled. + type: boolean + SoftwareStatus: + description: >- + SoftwareStatus of a node, used by the managed plane for running + nodes. Will be missing for other component types or if + not set by software. + type: string + Role: + # Component/node role currently assigned if not default. + $ref: '#/definitions/HMSRole.1.0.0' + SubRole: + # Component/node subrole currently assigned if not default. + $ref: '#/definitions/HMSSubRole.1.0.0' + NID: + description: This is the integer Node ID if the component is a node. + type: integer + example: 1 + Subtype: + description: Further distinguishes between components of same type. + type: string + readOnly: true + NetType: + # Type of network for HSN, if applicable and present. + $ref: '#/definitions/NetType.1.0.0' + Arch: + # Architecture type e.g. X86 or ARM. + $ref: '#/definitions/HMSArch.1.0.0' + Class: + # Hardware class e.g. River or Mountain. + $ref: '#/definitions/HMSClass.1.0.0' + ReservationDisabled: + description: >- + Whether component can be reserved via the locking API. + True when reservations are disabled, thus no new + reservations can be created on this component. + type: boolean + example: false + readOnly: true + Locked: + description: >- + Whether a component is locked via the locking API. + type: boolean + example: false + readOnly: true + type: object + Component.1.0.0_ComponentCreate: + description: >- + This is the logical representation of a component for which state is + tracked and includes other variables that may be needed by clients. + It is keyed by the physical location i.e. xname. + properties: + ID: + # Uniquely identifies the component by its physical location (xname) + $ref: '#/definitions/XNameRW.1.0.0' + State: + # Logical component state e.g. On, Off, Ready. + $ref: '#/definitions/HMSState.1.0.0' + Flag: + # Logical component state flag e.g. OK, Alert. + $ref: '#/definitions/HMSFlag.1.0.0' + Enabled: + description: >- + Whether component is enabled. True when enabled, false when disabled. + type: boolean + SoftwareStatus: + description: >- + SoftwareStatus of a node, used by the managed plane for running + nodes. Will be missing for other component types or if + not set by software. + type: string + Role: + # Component/node role currently assigned if not default. + $ref: '#/definitions/HMSRole.1.0.0' + SubRole: + # Component/node subrole currently assigned if not default. + $ref: '#/definitions/HMSSubRole.1.0.0' + NID: + description: This is the integer Node ID if the component is a node. + type: integer + example: 1 + Subtype: + description: Further distinguishes between components of same type. + type: string + NetType: + # Type of network for HSN, if applicable and present. + $ref: '#/definitions/NetType.1.0.0' + Arch: + # Architecture type e.g. X86 or ARM. + $ref: '#/definitions/HMSArch.1.0.0' + Class: + # Hardware class e.g. River or Mountain. + $ref: '#/definitions/HMSClass.1.0.0' + type: object + required: + - ID + - State + Component.1.0.0_Put: + description: >- + This is the payload of a state components URI put operation on a + component. + properties: + Component: + $ref: '#/definitions/Component.1.0.0_ComponentCreate' + Force: + description: >- + If true, 'force' causes this operation to overwrite the 'State', + 'Flag', 'Subtype', 'NetType', and 'Arch' fields for the specified + component if it already exists. Otherwise, nothing will be + overwritten. + type: boolean + type: object + required: + - Component + # + # Component Patch operation payloads. Modify only selected fields + # given the corresponding API that is used to patch. + # + Component.1.0.0_Patch.StateData: + description: >- + This is the payload of a StateData URI patch operation on a component. + Flag ID optional and will be reset to OK if no Flag value is given. + properties: + State: + # Logical component state e.g. On, Off, Ready. + $ref: '#/definitions/HMSState.1.0.0' + Flag: + # Logical component state flag e.g. OK, Alert. + $ref: '#/definitions/HMSFlag.1.0.0' + Force: + description: >- + If the state change is normally prohibited, due to the current + and new states, force the change anyways. Default is false. + type: boolean + example: false + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + type: object + required: + - State + Component.1.0.0_Patch.FlagOnly: + description: >- + This is the payload of a FlagOnly patch operation on a component. + Flag is required and the State field is unmodified regardless of the + value given. + properties: + Flag: + # Logical component state flag e.g. OK, Alert. + $ref: '#/definitions/HMSFlag.1.0.0' + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + type: object + required: + - Flag + Component.1.0.0_Patch.Enabled: + description: >- + This is the payload of a Enabled patch operation on a Component. + Enabled is required, and is a boolean field with true representing + enabled and false disabled. + properties: + Enabled: + description: Component Enabled(true)/Disabled(false) flag + type: boolean + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + type: object + required: + - Enabled + Component.1.0.0_Patch.SoftwareStatus: + description: >- + This is the payload of a SoftwareStatus patch operation on a Component. + properties: + SoftwareStatus: + description: >- + Component/node software status field, reserved for managed plane. + type: string + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + type: object + Component.1.0.0_Patch.Role: + description: >- + This is the payload of a Role patch operation on a Component. + Role is required, however operation will fail if Role is not + a supported property of the corresponding HMS type. + properties: + Role: + # Component/node role currently assigned if not default. + $ref: '#/definitions/HMSRole.1.0.0' + SubRole: + # Component/node subrole currently assigned if not default. + $ref: '#/definitions/HMSSubRole.1.0.0' + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + type: object + required: + - Role + Component.1.0.0_Patch.NID: + description: >- + This is the payload of a NID patch operation on a Component. + NID is required but the operation will fail if NID is not a valid + properties: + NID: + description: This is the integer Node ID if the component is a node. + type: integer + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + type: object + required: + - NID + # + # Component Patch payloads - Bulk operations with ComponentArray + # + Component.1.0.0_PatchArrayItem.NID: + description: >- + This is one entry in a NID patch operation on an entire + ComponentArray. ID and NID are required or the operation will fail. + Only the NID field is updated, and then only if it is appropriate + for the corresponding HMS type of the entry (e.g. node). + properties: + ID: + $ref: '#/definitions/XNameForQuery.1.0.0' + Type: + $ref: '#/definitions/HMSType.1.0.0' + NID: + description: This is the integer Node ID if the component is a node. + type: integer + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + required: + - ID + - NID + type: object + Component.1.0.0_ResourceURICollection: + properties: + Name: + description: >- + Should describe the collection, though the type of resources + the links correspond to should also be inferred from the context + in which the collection was obtained. + type: string + readOnly: true + example: (Type of Object) Collection + Members: + description: An array of ResourceIds. + items: + $ref: '#/definitions/ResourceURI.1.0.0' + type: array + readOnly: true + MemberCount: + description: Number of ResourceURIs in the collection + type: number + format: int32 + readOnly: true + type: object + ComponentByNID.1.0.0_ResourceURICollection: + properties: + Name: + description: >- + Should describe the collection, though the type of resources + the links correspond to should also be inferred from the context + in which the collection was obtained. + type: string + readOnly: true + example: (Type of Object) Collection + Members: + description: An array of ResourceIds. + items: + $ref: '#/definitions/ResourceURI.1.0.0' + type: array + readOnly: true + MemberCount: + description: Number of ResourceURIs in the collection + type: number + format: int32 + readOnly: true + type: object + # + # ComponentArray + # + ComponentArray_ComponentArray: + description: >- + This is a collection of Component objects returned whenever a query + is expected to result in 0 to n matches. + properties: + Components: + description: Contains the HMS component objects in the array. + items: + $ref: '#/definitions/Component.1.0.0_Component' + type: array + type: object + ComponentArray_PostArray: + description: >- + This is a component post request. Contains the new component fields to + apply. + properties: + Components: + description: >- + Contains the HMS component objects in the array. + items: + $ref: '#/definitions/Component.1.0.0_ComponentCreate' + type: array + Force: + description: >- + If true, 'force' causes this operation to overwrite the 'State', + 'Flag', 'Subtype', 'NetType', and 'Arch' fields for the specified + component if it already exists. Otherwise, nothing will be + overwritten. + type: boolean + required: + - Components + type: object + ComponentArray_PatchArray.StateData: + description: >- + This is a component state data patch request. Contains the new state + to apply, new flag to apply (optional), and a list of component xnames + for update. If the component flag is omitted, the flag will be reset + to 'ok'. + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to update. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + State: + description: >- + The new state to apply. + # Logical component state e.g. On, Off, Ready. + $ref: '#/definitions/HMSState.1.0.0' + Flag: + description: >- + The new flag to apply. If omitted, the component flag reset to 'ok'. + # Logical component state flag e.g. OK, Alert. + $ref: '#/definitions/HMSFlag.1.0.0' + Force: + description: >- + If the state change is normally prohibited, due to the current + and new states, force the change anyways. Default is false. + type: boolean + example: false + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + required: + - ComponentIDs + - State + type: object + ComponentArray_PatchArray.FlagOnly: + description: >- + This is a component flag value patch request. Contains the new flag + to apply and a list of component xnames for update. + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to update. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + Flag: + description: >- + The new flag to apply. + # Logical component state flag e.g. OK, Alert. + $ref: '#/definitions/HMSFlag.1.0.0' + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + required: + - ComponentIDs + - Flag + type: object + ComponentArray_PatchArray.Enabled: + description: >- + This is a component Enabled field patch request. Contains the new value + of enabled to apply and the list of component xnames to update. + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to update. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + Enabled: + description: >- + Whether component is enabled. True when enabled, false when disabled. + type: boolean + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + required: + - ComponentIDs + - Enabled + type: object + ComponentArray_PatchArray.SoftwareStatus: + description: >- + This is a component SoftwareStatus field patch request. Contains a new, + single value of SoftwareStatus to apply, and the list of component xnames + to update. + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to update. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + SoftwareStatus: + description: >- + SoftwareStatus of the node, used by the managed plane for running + nodes. + type: string + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + required: + - ComponentIDs + - SoftwareStatus + type: object + ComponentArray_PatchArray.Role: + description: >- + This is a component Role value patch request. Contains the new Role + to apply and a list of component xnames for update. + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to update. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + Role: + description: >- + The new Role to apply. + $ref: '#/definitions/HMSRole.1.0.0' + SubRole: + description: >- + The new SubRole to apply. + $ref: '#/definitions/HMSSubRole.1.0.0' + ExtendedInfo: + # This is the message that should explain what triggered the change. + # it is optional and may be omitted. + $ref: '#/definitions/Message_1.0.0_ExtendedInfo' + required: + - ComponentIDs + - Role + type: object + ComponentArray_PatchArray.NID: + description: >- + This is a collection of Component objects with just the ID and + NID fields populated. + properties: + Name: + description: Descriptive name e.g. why it was generated. + readOnly: true + type: string + Components: + description: >- + Contains the component objects in the array but with just the + Component ID and the patchable fields for a NID patch + operation filled in. Other Component fields are not undated + during these operations. + items: + $ref: '#/definitions/Component.1.0.0_PatchArrayItem.NID' + type: array + type: object + # + # Query POSTs to supply arbitrary list of components. + # + ComponentArray_PostQuery: + description: >- + There are limits to the length of an HTTP URL and query string. + Hence, if we wish to query an arbitrary list of XName/IDs, it + will need to be in the body of the request. This object is + used for this purpose. It is similar to the analogous GET operation. + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to query. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + partition: + description: >- + Partition name to filter on, as per current /partitions/names + type: string + example: p1 + group: + description: >- + Group label to filter on, as per current /groups/labels + type: string + example: group_label + stateonly: + description: >- + Return only component state and flag fields (plus xname/ID and + type). Results can be modified and used for bulk state/flag- + only patch operations. + type: boolean + flagonly: + description: >- + Return only component flag field (plus xname/ID and type). + Results can be modified and used for bulk flag-only patch + operations. + type: boolean + roleonly: + description: >- + Return only component role and subrole fields (plus xname/ID and type). + Results can be modified and used for bulk role-only patches. + type: boolean + nidonly: + description: >- + Return only component NID field (plus xname/ID and type). + Results can be modified and used for bulk NID-only patches. + type: boolean + type: + description: >- + Retrieve all components with the given HMS type. + items: + type: string + type: array + state: + description: >- + Retrieve all components with the given HMS state. + items: + type: string + type: array + flag: + description: >- + Retrieve all components with the given HMS flag value. + items: + type: string + type: array + enabled: + description: >- + Retrieve all components with the given enabled status (true or false). + items: + type: string + type: array + softwarestatus: + description: >- + Retrieve all components with the given software status. + Software status is a free form string. Matching is case-insensitive. + items: + type: string + type: array + role: + description: >- + Retrieve all components (i.e. nodes) with the given HMS role + items: + type: string + type: array + subrole: + description: >- + Retrieve all components (i.e. nodes) with the given HMS subrole + items: + type: string + type: array + subtype: + description: >- + Retrieve all components with the given HMS subtype. + items: + type: string + type: array + arch: + description: >- + Retrieve all components with the given architecture. + items: + type: string + type: array + class: + description: >- + Retrieve all components (i.e. nodes) with the given HMS hardware class. Class can be + River, Mountain, etc. + items: + type: string + type: array + nid: + description: >- + Retrieve all components (i.e. one node) with the given integer NID + items: + type: string + type: array + nid_start: + description: >- + Retrieve all components (i.e. nodes) with NIDs equal to or greater + than the provided integer. + items: + type: string + type: array + nid_end: + description: >- + Retrieve all components (i.e. nodes) with NIDs less than or equal + to the provided integer. + items: + type: string + type: array + type: object + ComponentArray_PostByNIDQuery: + description: >- + There are limits to the length of an HTTP URL and query string. + Hence, if we wish to query an arbitrary list of NIDs, it + will need to be in the body of the request. This object is + used for this purpose. Parameters are similar to the analogous GET + operation. + properties: + # NIDs: + # description: >- + # NID values to query, producing a ComponentArray with the matching + # components. + # items: + # type: integer + # format: int32 + # type: array + NIDRanges: + description: >- + NID range values to query, producing a ComponentArray with the + matching components, e.g. "0-24" or "2". Add each multiple ranges + as a separate array item. + items: + $ref: '#/definitions/NIDRange.1.0.0' + type: array + partition: + $ref: '#/definitions/XNamePartition.1.0.0' + stateonly: + description: >- + Return only component state and flag fields (plus xname/ID and + type). Results can be modified and used for bulk state/flag- + only patch operations. + type: boolean + flagonly: + description: >- + Return only component flag field (plus xname/ID and type). + Results can be modified and used for bulk flag-only patch + operations. + type: boolean + roleonly: + description: >- + Return only component role and subrole fields (plus xname/ID and type). + Results can be modified and used for bulk role-only patches. + type: boolean + nidonly: + description: >- + Return only component NID field (plus xname/ID and type). + Results can be modified and used for bulk NID-only patches. + type: boolean + required: + - NIDRanges + type: object + ########################################################################## + # + # Node Map structures - Default xname->NID/Role/etc. mappings. + # + ########################################################################## + NodeMap.1.0.0_NodeMap: + description: >- + NodeMaps are a way of pre-populating state manager with a set of + valid node xnames (currently populated, or just potentially populated) + and assigning each a default NID (and optionally also a Role and SubRole). + NID is required and must be unique within the NodeMaps. + + When components are first discovered, if a matching NodeMap entry is + found, that NID will be used to create the component entry. This + allows NIDs to be defined in advance in an orderly way that allows + NID ranges to be consecutive on the set of xnames that is actually + used for a particular hardware config. The default NIDs used + if no NodeMap is present are based on enumerating NIDs for ALL + POSSIBLE xnames, even though in practice only a small subset will + be used for any particular hardware config (resulting in very sparse + assignments). NodeMaps, then, help avoid this. + + Updating NodeMaps for already discovered components (unless they are + deleted and then rediscovered) will not automatically update the NID + field in States/Components. Likewise using a patch to update NID + on a particular entry in States/Components will not automatically + define or update a NodeMap entry. + properties: + ID: + # Uniquely identifies a node by its physical location (xname) + $ref: '#/definitions/XName.1.0.0' + NID: + description: Positive default Node ID (NID) for the xname in ID + type: integer + example: 1 + Role: + # Component/node role currently assigned if not default. + $ref: '#/definitions/HMSRole.1.0.0' + SubRole: + # Component/node subrole currently assigned if not default. + $ref: '#/definitions/HMSSubRole.1.0.0' + required: + - NID + type: object + NodeMap.1.0.0_PostNodeMap: + description: >- + NodeMaps are a way of pre-populating state manager with a set of + valid node xnames (currently populated, or just potentially populated) + and assigning each a default NID (and optionally also a Role and SubRole). + NID is required and must be unique within the NodeMaps. + + When components are first discovered, if a matching NodeMap entry is + found, that NID will be used to create the component entry. This + allows NIDs to be defined in advance in an orderly way that allows + NID ranges to be consecutive on the set of xnames that is actually + used for a particular hardware config. The default NIDs used + if no NodeMap is present are based on enumerating NIDs for ALL + POSSIBLE xnames, even though in practice only a small subset will + be used for any particular hardware config (resulting in very sparse + assignments). NodeMaps, then, help avoid this. + + Updating NodeMaps for already discovered components (unless they are + deleted and then rediscovered) will not automatically update the NID + field in States/Components. Likewise using a patch to update NID + on a particular entry in States/Components will not automatically + define or update a NodeMap entry. + properties: + ID: + # Uniquely identifies a node by its physical location (xname) + $ref: '#/definitions/XNameForQuery.1.0.0' + NID: + description: Positive default Node ID (NID) for the xname in ID + type: integer + example: 1 + Role: + # Component/node role currently assigned if not default. + $ref: '#/definitions/HMSRole.1.0.0' + SubRole: + # Component/node subrole currently assigned if not default. + $ref: '#/definitions/HMSSubRole.1.0.0' + required: + - ID + - NID + type: object + NodeMapArray_NodeMapArray: + description: >- + This is a named array of NodeMap objects. This is the result of + GET-ing the NodeMaps collection, or can be used to populate or + update it as input provided via POST. + properties: + NodeMaps: + description: Contains the NodeMap objects in the array. + items: + $ref: '#/definitions/NodeMap.1.0.0_PostNodeMap' + type: array + type: object + ######################################################################### + # + # Redfish ComponentEndpoint data - Represents Redfish discovered data for + # components running under a particular + # Redfish endpoint, needed for services + # that interact with these at via Redfish. + # + ######################################################################### + ComponentEndpoint.1.0.0_ComponentEndpoint: + description: >- + This describes a child component of a Redfish endpoint and is populated + when Redfish endpoint discovery occurs. It is used by services that + need to interact directly with the component via Redfish. + It represents a physical component of something and has a corresponding + representation as an HMS Component, hence the name. + There are also ServiceEndpoints which represent Redfish services that + are discovered when the RedfishEndpoint is discovered. + + NOTE: These records are discovered, not created, and therefore are not + writable (since any changes would be overwritten by a subsequent + discovery). + + Additional info is appended depending on RedfishType (discriminator) + properties: + ID: + # Uniquely identifies the component endpoint by its location i.e. xname. + $ref: '#/definitions/XName.1.0.0' + Type: + # HMS Logical component type e.g. Node, Cabinet, etc. + $ref: '#/definitions/HMSType.1.0.0' + Domain: + description: Domain of component FQDN. Hostname is always ID/xname + type: string + example: mgmt.example.domain.com + FQDN: + description: >- + Fully-qualified domain name of component on management network if + for example the component is a node. + type: string + example: x0c0s0b0n0.mgmt.example.domain.com + RedfishType: + # This is the Redfish object type, not to be confused with the HMS + # component type. In this case, either a Manager, Chassis or + # ComputerSystem. + $ref: '#/definitions/RedfishType.1.0.0' + RedfishSubtype: + # This is the type corresponding to the Redfish object type, i.e. the + # ChassisType field, SystemType, ManagerType fields. + $ref: '#/definitions/RedfishSubtype.1.0.0' + Enabled: + description: >- + To disable a component without deleting its data from the database, + can be set to false + type: boolean + example: true + ComponentEndpointType: + description: >- + This is used as a discriminator to determine the additional RF-type- + specific data that is kept for a ComponentEndpoint. + enum: + - ComponentEndpointChassis + - ComponentEndpointComputerSystem + - ComponentEndpointManager + - ComponentEndpointPowerDistribution + - ComponentEndpointOutlet + type: string + example: ComponentEndpointComputerSystem + MACAddr: + description: >- + If the component e.g. a ComputerSystem/Node has a MAC on the + management network, i.e. corresponding to the FQDN field's + Ethernet interface, this field will be present. Not the HSN + MAC. Represented as the standard colon-separated 6 byte hex string. + pattern: '^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$' + type: string + example: ae:12:ce:7a:aa:99 + UUID: + $ref: '#/definitions/UUID.1.0.0' + OdataID: + # This is the relative path to the component relative to the parent + # RedfishEndpoint's service root. + $ref: '#/definitions/OdataID.1.0.0' + RedfishEndpointID: + # Unique identifier for the parent RedfishEndpoint by its location + # i.e. xname. This is essentially a back-reference to the RF endpoint + # that was used to discover this component. + $ref: '#/definitions/XNameRFEndpoint.1.0.0' + RedfishEndpointFQDN: + description: >- + This is a back-reference to the fully-qualified domain name of the + parent Redfish endpoint that was used to discover the component. It + is the RedfishEndpointID field i.e. the hostname/xname plus its + current domain. + type: string + readOnly: true + example: x0c0s0b0.mgmt.example.domain.com + RedfishURL: + description: >- + Complete URL to the corresponding Redfish object, + combining the RedfishEndpoint's FQDN and the OdataID. + type: string + readOnly: true + example: + x0c0s0b0.mgmt.example.domain.com/redfish/v1/Systems/System.Embedded.1 + type: object + discriminator: ComponentEndpointType + required: + - ComponentEndpointType + ComponentEndpointChassis: + description: >- + This is a subtype of ComponentEndpoint for Chassis RF components, + i.e. of most HMS components other than nodes and BMCs. + This subtype is used when the ComponentEndpoint's ComponentEndpointType + is 'ComponentEndpointChassis' via the 'discriminator: + ComponentEndpointType' property. + allOf: + - $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + - type: object + properties: + RedfishChassisInfo: + $ref: '#/definitions/ComponentEndpoint.1.0.0_RedfishChassisInfo' + type: object + ComponentEndpointComputerSystem: + description: >- + This is a subtype of ComponentEndpoint for ComputerSystem RF components, + i.e. a node HMS type. + This subtype is used when the ComponentEndpoint's ComponentEndpointType + is 'ComponentEndpointComputerSystem' via the 'discriminator: + ComponentEndpointType' property. + allOf: + - $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + - type: object + properties: + RedfishSystemInfo: + $ref: '#/definitions/ComponentEndpoint.1.0.0_RedfishSystemInfo' + type: object + ComponentEndpointManager: + description: >- + This is a subtype of ComponentEndpoint for Manager RF components, + i.e. any BMC type. For example NodeBMC is a Manager, NodeEnclosure + is a Chassis RF type. + This subtype is used when the ComponentEndpoint's ComponentEndpointType + is 'ComponentEndpointManager' via the 'discriminator: + ComponentEndpointType' property. + allOf: + - $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + - type: object + properties: + RedfishManagerInfo: + $ref: '#/definitions/ComponentEndpoint.1.0.0_RedfishManagerInfo' + type: object + ComponentEndpointPowerDistribution: + description: >- + This is a subtype of ComponentEndpoint for PowerDistribution RF + components. This subtype is used when the ComponentEndpoints + ComponentEndpointType is ComponentEndpointPowerDistribution via + the discriminator: ComponentEndpointType property. + allOf: + - $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + - type: object + properties: + RedfishChassisInfo: + $ref: + '#/definitions/ComponentEndpoint.1.0.0_RedfishPowerDistributionInfo' + type: object + ComponentEndpointOutlet: + description: >- + This is a subtype of ComponentEndpoint for PowerDistribution Outlet RF + components. This subtype is used when the ComponentEndpoints + ComponentEndpointType is ComponentEndpointOutlet via + the discriminator: ComponentEndpointType property. + allOf: + - $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + - type: object + properties: + RedfishChassisInfo: + $ref: '#/definitions/ComponentEndpoint.1.0.0_RedfishOutletInfo' + type: object + ComponentEndpoint.1.0.0_RedfishChassisInfo: + description: >- + This is the ChassisInfo field in the RF Chassis subtype of + ComponentEndpoint, i.e. when the latter's RedfishType is Chassis. + This is where new fields will be added. + properties: + Name: + description: The Redfish 'Name' of the Chassis. + type: string + readOnly: true + Actions: + $ref: '#/definitions/Actions_1.0.0_ChassisActions' + type: object + ComponentEndpoint.1.0.0_RedfishSystemInfo: + description: >- + This is the SystemInfo object in the RF ComputerSystem subtype of + ComponentEndpoint, i.e. when the latter's RedfishType is ComputerSystem. + It contains HMS-Node/ComputerSystem-specific Redfish fields that need + to be collected during discovery and made available to clients. + This is where new fields will be added. Mostly placeholder now. + properties: + Name: + description: The Redfish 'name' of the ComputerSystem. + type: string + readOnly: true + Actions: + $ref: '#/definitions/Actions_1.0.0_ComputerSystemActions' + EthernetNICInfo: + items: + $ref: '#/definitions/EthernetNICInfo_1.0.0' + type: array + PowerURL: + description: The URL for the power info for this node. + type: string + readOnly: true + example: /redfish/v1/Chassis/Node0/Power + PowerControl: + items: + $ref: '#/definitions/PowerControl_1.0.0' + type: array + type: object + ComponentEndpoint.1.0.0_RedfishManagerInfo: + description: >- + This is the ManagerInfo object in the RF Manager subtype of + ComponentEndpoint, i.e. when the latter's RedfishType is Manager. + It contains BMC/Manager-specific Redfish fields that need + to be collected during discovery and made available to clients. + This is where new fields will be added. Mostly placeholder now. + properties: + Name: + description: The Redfish 'Name' of the Manager. + type: string + readOnly: true + Actions: + $ref: '#/definitions/Actions_1.0.0_ManagerActions' + EthernetNICInfo: + items: + $ref: '#/definitions/EthernetNICInfo_1.0.0' + type: array + type: object + ComponentEndpoint.1.0.0_RedfishPowerDistributionInfo: + description: >- + This is the RedfishPDUInfo field in the RF Chassis subtype of + ComponentEndpoint, i.e. when the latter's RedfishType is + PowerDistribution. This is where new fields will be added. + properties: + Name: + description: The Redfish Name of the PDU. + type: string + readOnly: true + # Actions: + # $ref: '#/definitions/Actions_1.0.0_PDUActions' + type: object + ComponentEndpoint.1.0.0_RedfishOutletInfo: + description: >- + This is the RedfishOutletInfo field in the RF Outlet subtype of + ComponentEndpoint, i.e. when the latter's RedfishType is Outlet. + This is where new fields will be added. + properties: + Name: + description: The Redfish Name of the Outlet. + type: string + readOnly: true + Actions: + $ref: '#/definitions/Actions_1.0.0_OutletActions' + type: object + ComponentEndpoint.1.0.0_ResourceURICollection: + properties: + Name: + description: >- + Should describe the collection, though the type of resources + the links correspond to should also be inferred from the context + in which the collection was obtained. + type: string + readOnly: true + example: (Type of Object) Collection + Members: + description: An array of ResourceIds. + items: + $ref: '#/definitions/ResourceURI.1.0.0' + type: array + readOnly: true + MemberCount: + description: Number of ResourceURIs in the collection + type: number + format: int32 + readOnly: true + type: object + ComponentEndpointArray_ComponentEndpointArray: + description: >- + This is a collection of ComponentEndpoint objects returned whenever a + query is expected to result in 0 to n matches. + properties: + ComponentEndpoints: + description: Contains the HMS RedfishEndpoint objects in the array. + items: + $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + type: array + type: object + # + # ComponentEndpoint POST query bodies + # + ComponentEndpointArray_PostQuery: + description: >- + There are limits to the length of an HTTP URL and query string. + Hence, if we wish to query an arbitrary list of XName/IDs, it + will need to be in the body of the request. This object is + used for this purpose. It is similar to the analogous GET operation. + properties: + ComponentEndpointIDs: + description: >- + An array of XName/ID values for the ComponentEndpoints to query. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + partition: + $ref: '#/definitions/XNamePartition.1.0.0' + required: + - ComponentEndpointIDs + type: object + ########################################################################### + # + # HSN Info - HSN NIC Addresses and Coordinates by xname + # + ########################################################################### + HSNInfo.1.0.0: + description: >- + Component to NIC and Network Coordinate Map + properties: + HSNTopology: + $ref: '#/definitions/HSNTopology.1.0.0' + HSNNetworkType: + $ref: '#/definitions/NetType.1.0.0' + HSNInfoEntries: + description: Contains an HSN info entry for each component. + items: + $ref: '#/definitions/HSNInfoEntry.1.0.0' + type: array + readOnly: true + type: object + HSNInfoEntry.1.0.0: + description: The HSN info for an individual component, e.g. node. + properties: + ID: + # The xname of the component location + $ref: '#/definitions/XName.1.0.0' + Type: + # The HMS Type of the location/xname + $ref: '#/definitions/HMSType.1.0.0' + NICAddrs: + # A collection of HSN NIC addresses in string form. + $ref: '#/definitions/NICAddrs.1.0.0' + HSNCoords: + description: + HSN Coordinates of the components, an integer tuple of a particular + length in array form. + items: + type: integer + format: int32 + type: array + example: [ 0, 0, 0, 0, 0 ] + readOnly: true + type: object + HSNTopology.1.0.0: + description: Numerical identifier for HSN topology class + type: integer + format: int32 + example: 0 + ########################################################################### + # + # Hardware Inventory + # + ########################################################################### + HWInventory.1.0.0_HWInventory: + description: >- + This is a collection of hardware inventory data. Depending on + the query only some of these arrays will be populated. + + Also, depending on the query that produced the inventory, some + components may have their subcomponents nested underneath them + (hierarchical query), rather than all arranged in their own arrays by + their types (flat query). + + The default is hierarchical for node subcomponents (Processors, Memory) + and flat for everything else, but other query types are possible and can + use this same basic structure. + + Either way, the 'Target' field is the parent component, partition or + system that is used to select the components for the query. + properties: + XName: + # Partition, component or s0 that is used to select the components + # in the inventory. + $ref: '#/definitions/XName.1.0.0' + Format: + description: >- + How results are displayed + FullyFlat All component types listed in their own + arrays only. No nesting of any children + Hierarchical All subcomponents listed as children up to + top level component (or set of cabinets) + NestNodesOnly Flat except that node subcomponents are nested + hierarchically. + Default is NestNodesOnly. + enum: + - FullyFlat + - Hierarchical + - NestNodesOnly + type: string + example: NestNodesOnly + Cabinets: + description: >- + All components with HMS type 'Cabinet' appropriate given Target + component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocCabinet' + readOnly: true + type: array + Chassis: + description: >- + All appropriate components with HMS type 'Chassis' given Target + component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocChassis' + readOnly: true + type: array + ComputeModules: + description: >- + All appropriate components with HMS type 'ComputeModule' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocComputeModule' + readOnly: true + type: array + RouterModules: + description: >- + All appropriate components with HMS type 'RouterModule' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocRouterModule' + readOnly: true + type: array + NodeEnclosures: + description: >- + All appropriate components with HMS type 'NodeEnclosure' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocNodeEnclosure' + readOnly: true + type: array + HSNBoards: + description: >- + All appropriate components with HMS type 'HSNBoard' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocHSNBoard' + readOnly: true + type: array + MgmtSwitches: + description: >- + All appropriate components with HMS type 'MgmtSwitch' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocMgmtSwitch' + readOnly: true + type: array + MgmtHLSwitches: + description: >- + All appropriate components with HMS type 'MgmtHLSwitch' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocMgmtHLSwitch' + readOnly: true + type: array + CDUMgmtSwitches: + description: >- + All appropriate components with HMS type 'CDUMgmtSwitch' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocCDUMgmtSwitch' + readOnly: true + type: array + Nodes: + description: >- + All appropriate components with HMS type 'Node' given Target + component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocNode' + readOnly: true + type: array + Processors: + description: >- + All appropriate components with HMS type 'Processor' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocProcessor' + readOnly: true + type: array + NodeAccels: + description: >- + All appropriate components with HMS type 'NodeAccel' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocNodeAccel' + readOnly: true + type: array + Drives: + description: >- + All appropriate components with HMS type 'Drive' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocDrive' + readOnly: true + type: array + Memory: + description: >- + All appropriate components with HMS type 'Memory' given Target + component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocMemory' + readOnly: true + type: array + CabinetPDUs: + description: >- + All appropriate components with HMS type 'CabinetPDU' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocPDU' + readOnly: true + type: array + CabinetPDUPowerConnectors: + description: >- + All appropriate components with HMS type 'CabinetPDUPowerConnector' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocOutlet' + readOnly: true + type: array + CMMRectifiers: + description: >- + All appropriate components with HMS type 'CMMRectifier' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocCMMRectifier' + readOnly: true + type: array + NodeAccelRisers: + description: >- + All appropriate components with HMS type 'NodeAccelRiser' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocNodeAccelRiser' + readOnly: true + type: array + NodeHsnNICs: + description: >- + All appropriate components with HMS type 'NodeHsnNic' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocHSNNIC' + readOnly: true + type: array + NodeEnclosurePowerSupplies: + description: >- + All appropriate components with HMS type 'NodeEnclosurePowerSupply' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocNodeEnclosurePowerSupply' + readOnly: true + type: array + NodeBMC: + description: >- + All appropriate components with HMS type 'NodeBMC' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocNodeBMC' + readOnly: true + type: array + RouterBMC: + description: >- + All appropriate components with HMS type 'RouterBMC' given + Target component/partition and query type. + items: + $ref: '#/definitions/HWInvByLocRouterBMC' + readOnly: true + type: array + type: object + # + # + # Hardware Inventory by location + # + # + HWInventory.1.0.0_HWInventoryByLocation: + description: >- + This is the basic entry in the hardware inventory for a particular + location/xname. If the location is populated (e.g. if a slot for a + blade exists and the blade is present), then there will also be a + link to the FRU entry for the physical piece of hardware that occupies + it. + properties: + ID: + # The xname of the component location + $ref: '#/definitions/XNameCompOrPartition.1.0.0' + Type: + # The HMS Type of the location/xname + $ref: '#/definitions/HMSType.1.0.0' + Ordinal: + description: >- + This is the normalized (from zero) index of the component location + (e.g. slot number) when there are more than one. This should + match the last number in the xname in most cases (e.g. Ordinal 0 + for node x0c0s0b0n0). Note that Redfish may use a different value + or naming scheme, but this is passed through via the *LocationInfo + for the type of component. + type: integer + format: int32 + readOnly: true + Status: + description: Populated or Empty - whether location is populated. + enum: + - Populated + - Empty + type: string + readOnly: true + HWInventoryByLocationType: + description: >- + This is used as a discriminator to determine the additional HMS-type + specific subtype that is returned. + enum: + - HWInvByLocCabinet + - HWInvByLocChassis + - HWInvByLocComputeModule + - HWInvByLocRouterModule + - HWInvByLocNodeEnclosure + - HWInvByLocHSNBoard + - HWInvByLocMgmtSwitch + - HWInvByLocMgmtHLSwitch + - HWInvByLocCDUMgmtSwitch + - HWInvByLocNode + - HWInvByLocProcessor + - HWInvByLocNodeAccel + - HWInvByLocNodeAccelRiser + - HWInvByLocDrive + - HWInvByLocMemory + - HWInvByLocPDU + - HWInvByLocOutlet + - HWInvByLocCMMRectifier + - HWInvByLocNodeEnclosurePowerSupply + - HWInvByLocNodeBMC + - HWInvByLocRouterBMC + - HWInvByLocHSNNIC + type: string + PopulatedFRU: + # If Status is 'Populated' then this will embed the FRU object. + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + type: object + discriminator: HWInventoryByLocationType + required: + - HWInventoryByLocationType + - ID + # See example for appropriate subclass per HWInventoryByLocationType + # field. + example: + HWInvByLocCabinet: + description: >- + This is a subtype of HWInventoryByLocation for HMSType Cabinet. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocCabinet'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + CabinetLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + Chassis: + description: >- + Embedded chassis HWInv object array representing subcomponents + (if query is hierarchical). + items: + $ref: '#/definitions/HWInvByLocChassis' + readOnly: true + type: array + type: object + example: + ID: x0 + Type: Cabinet + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocCabinet + CabinetLocationInfo: + Id: Cabinet + Name: Name describing cabinet or where it is located, per manufacturing + Description: Description of cabinet, per manufacturing + Hostname: if_defined_in_Redfish + PopulatedFRU: + FRUID: Cray-2345-1234556789 + Type: Cabinet + Subtype: MountainCabinet (example) + HWInventoryByFRUType: HWInvByFRUCabinet + CabinetFRUInfo: + AssetTag: AdminAssignedAssetTag + Model: 123 + Manufacturer: Cray + PartNumber: p2345 + SerialNumber: sn1234556789 + SKU: as213234 + ChassisType: Rack + HWInvByLocChassis: + description: >- + This is a subtype of HWInventoryByLocation for HMSType Chassis. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocChassis'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + ChassisLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + ComputeModules: + description: >- + Embedded ComputeModule HWInv object array representing + subcomponents of that type (if query is hierarchical). + items: + $ref: '#/definitions/HWInvByLocComputeModule' + readOnly: true + type: array + RouterModules: + description: >- + Embedded RouterModule HWInv object array representing + subcomponents of that type (if query is hierarchical). + items: + $ref: '#/definitions/HWInvByLocRouterModule' + readOnly: true + type: array + type: object + example: + ID: x0c0 + Type: Chassis + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocChassis + ChassisLocationInfo: + Id: Chassis.1 + Name: Name describing component or its location, per manufacturing + Description: Description, per manufacturing + Hostname: if_defined_in_Redfish + PopulatedFRU: + FRUID: Cray-ch01-23452345 + Type: Chassis + Subtype: MountainChassis (example) + HWInventoryByFRUType: HWInvByFRUChassis + ChassisFRUInfo: + AssetTag: AdminAssignedAssetTag + Model: 3245 + Manufacturer: Cray + PartNumber: ch01 + SerialNumber: sn23452345 + SKU: as213234 + ChassisType: Enclosure + HWInvByLocComputeModule: + description: >- + This is a subtype of HWInventoryByLocation for HMSType ComputeModule. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocComputeModule'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + ComputeModuleLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + NodeEnclosures: + description: >- + Embedded NodeEnclosure HWInv object array representing + subcomponents of that type (if query is hierarchical). + items: + $ref: '#/definitions/HWInvByLocNodeEnclosure' + readOnly: true + type: array + type: object + HWInvByLocRouterModule: + description: >- + This is a subtype of HWInventoryByLocation for HMSType RouterModule. + This is a Mountain switch module. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocRouterModule'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + RouterModuleLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + HSNBoards: + description: >- + Embedded HSNBoard HWInv object array representing + subcomponents of that type (if query is hierarchical). + items: + $ref: '#/definitions/HWInvByLocHSNBoard' + readOnly: true + type: array + type: object + HWInvByLocNodeEnclosure: + description: >- + This is a subtype of HWInventoryByLocation for HMSType NodeEnclosure. + It represents a Mountain node card or River rack enclosure. It is + NOT the BMC, which is separate and corresponds to a Redfish Manager. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocNodeEnclosure'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + NodeEnclosureLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + HWInvByLocHSNBoard: + description: >- + This is a subtype of HWInventoryByLocation for HMSType HSNBoard. + It represents a Mountain switch card or River TOR enclosure. It is + NOT the BMC, which is separate and corresponds to a Redfish Manager. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocHSNBoard'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + HSNBoardLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + type: object + HWInvByLocMgmtSwitch: + description: >- + This is a subtype of HWInventoryByLocation for HMSType MgmtSwitch. + It represents a management switch. It is selected via the + 'discriminator: HWInventoryByLocationType' of HWInventoryByLocation + when HWInventoryByLocationType is 'HWInvByLocMgmtSwitch'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + MgmtSwitchLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + type: object + HWInvByLocMgmtHLSwitch: + description: >- + This is a subtype of HWInventoryByLocation for HMSType MgmtHLSwitch. + It represents a high level management switch. It is selected via the + 'discriminator: HWInventoryByLocationType' of HWInventoryByLocation + when HWInventoryByLocationType is 'HWInvByLocMgmtHLSwitch'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + MgmtHLSwitchLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + type: object + HWInvByLocCDUMgmtSwitch: + description: >- + This is a subtype of HWInventoryByLocation for HMSType CDUMgmtSwitch. + It represents a CDU management switch. It is selected via the + 'discriminator: HWInventoryByLocationType' of HWInventoryByLocation + when HWInventoryByLocationType is 'HWInvByLocCDUMgmtSwitch'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + CDUMgmtSwitchLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisLocationInfo' + type: object + HWInvByLocNode: + description: >- + This is a subtype of HWInventoryByLocation for HMSType Node. + It represents a service, compute, or system node. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocNode'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + NodeLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishSystemLocationInfo' + Processors: + description: >- + Embedded Processor HWInv object array representing + subcomponents of that type (this is default for Nodes). + items: + $ref: '#/definitions/HWInvByLocProcessor' + readOnly: true + type: array + NodeAccels: + description: >- + Embedded NodeAccel HWInv object array representing + subcomponents of that type (this is default for Nodes). + items: + $ref: '#/definitions/HWInvByLocNodeAccel' + readOnly: true + type: array + Drives: + description: >- + Embedded Drives HWInv object array representing + subcomponents of that type (this is default for Nodes). + items: + $ref: '#/definitions/HWInvByLocDrive' + readOnly: true + type: array + Memory: + description: >- + Embedded Memory HWInv object array representing + subcomponents of that type (this is default for Nodes). + items: + $ref: '#/definitions/HWInvByLocMemory' + readOnly: true + type: array + NodeAccelRisers: + description: >- + Embedded NodeAccelRiser HWInv object array representing + subcomponents of that type (this is default for Nodes). + items: + $ref: '#/definitions/HWInvByLocNodeAccelRiser' + readOnly: true + type: array + NodeHsnNICs: + description: >- + Embedded NodeHsnNIC HWInv object array representing + subcomponents of that type (this is default for Nodes). + items: + $ref: '#/definitions/HWInvByLocHSNNIC' + readOnly: true + type: array + type: object + example: + ID: x0c0s0b0n0 + Type: Node + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocNode + NodeLocationInfo: + Id: System.Embedded.1 + Name: Name describing system or where it is located, per manufacturing + Description: Description of system/node type, per manufacturing + Hostname: if_defined_in_Redfish + ProcessorSummary: + Count: 2 + Model: Multi-Core Intel(R) Xeon(R) processor E5-16xx Series + MemorySummary: + TotalSystemMemoryGiB: 64 + PopulatedFRU: + FRUID: Dell-99999-1234.1234.2345 + Type: Node + Subtype: River + HWInventoryByFRUType: HWInvByFRUNode + NodeFRUInfo: + AssetTag: AdminAssignedAssetTag + BiosVersion: v1.0.2.9999 + Model: OKS0P2354 + Manufacturer: Dell + PartNumber: p99999 + SerialNumber: 1234.1234.2345 + SKU: as213234 + SystemType: Physical + UUID: 26276e2a-29dd-43eb-8ca6-8186bbc3d971 + Processors: + - ID: x0c0s0b0n0p0 + Type: Processor + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocProcessor + ProcessorLocationInfo: + Id: CPU1 + Name: Processor + Description: Socket 1 Processor + Socket: CPU 1 + PopulatedFRU: + FRUID: HOW-TO-ID-CPUS-FROM-REDFISH-IF-AT-ALL + Type: Processor + Subtype: SKL24 + HWInventoryByFRUType: HWInvByFRUProcessor + ProcessorFRUInfo: + InstructionSet: x86-64 + Manufacturer: Intel + MaxSpeedMHz: 2600 + Model: Intel(R) Xeon(R) CPU E5-2623 v4 @ 2.60GHz + ProcessorArchitecture: x86 + ProcessorId: + EffectiveFamily: 6 + EffectiveModel: 79 + IdentificationRegisters: 0x000406F1 + MicrocodeInfo: 0xB000017 + Step: 1 + VendorID: GenuineIntel + ProcessorType: CPU + TotalCores: 24 + TotalThreads: 48 + - ID: x0c0s0b0n0p1 + Type: Processor + Ordinal: 1 + Status: Populated + HWInventoryByLocationType: HWInvByLocProcessor + ProcessorLocationInfo: + Id: CPU2 + Name: Processor + Description: Socket 2 Processor + Socket: CPU 2 + PopulatedFRU: + FRUID: HOW-TO-ID-CPUS-FROM-REDFISH-IF-AT-ALL + Type: Processor + Subtype: SKL24 + HWInventoryByFRUType: HWInvByFRUProcessor + ProcessorFRUInfo: + InstructionSet: x86-64 + Manufacturer: Intel + MaxSpeedMHz: 2600 + Model: Intel(R) Xeon(R) CPU E5-2623 v4 @ 2.60GHz + ProcessorArchitecture: x86 + ProcessorId: + EffectiveFamily: 6 + EffectiveModel: 79 + IdentificationRegisters: 0x000406F1 + MicrocodeInfo: 0xB000017 + Step: 1 + VendorID: GenuineIntel + ProcessorType: CPU + TotalCores: 24 + TotalThreads: 48 + Memory: + - ID: x0c0s0b0n0d0 + Type: Memory + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM1 + Name: DIMM Slot 1 + MemoryLocation: + Socket: 1 + MemoryController: 1 + Channel: 1 + Slot: 1 + PopulatedFRU: + FRUID: MFR-PARTNUMBER-SERIALNUMBER + Type: Memory + Subtype: DIMM2400G32 + HWInventoryByFRUType: HWInvByFRUMemory + MemoryFRUInfo: + BaseModuleType: RDIMM + BusWidthBits: 72 + CapacityMiB: 32768 + DataWidthBits: 64 + ErrorCorrection: MultiBitECC + Manufacturer: Micron + MemoryType: DRAM + MemoryDeviceType: DDR4 + OperatingSpeedMhz: 2400 + PartNumber: XYZ-123-1232 + RankCount: 2 + SerialNumber: sn12344567689 + - ID: x0c0s0b0n0d1 + Type: Memory + Ordinal: 1 + Status: Empty + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM2 + Name: Socket 1 DIMM Slot 2 + MemoryLocation: + Socket: 1 + MemoryController: 1 + Channel: 1 + Slot: 2 + PopulatedFRU: + - ID: x0c0s0b0n0d2 + Type: Memory + Ordinal: 2 + Status: Populated + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM3 + Name: Socket 2 DIMM Slot 1 + MemoryLocation: + Socket: 2 + MemoryController: 2 + Channel: 1 + Slot: 1 + PopulatedFRU: + FRUID: MFR-PARTNUMBER-SERIALNUMBER_2 + Type: Memory + Subtype: DIMM2400G32 + HWInventoryByFRUType: HWInvByFRUMemory + MemoryFRUInfo: + BaseModuleType: RDIMM + BusWidthBits: 72 + CapacityMiB: 32768 + DataWidthBits: 64 + ErrorCorrection: MultiBitECC + Manufacturer: Micron + MemoryType: DRAM + MemoryDeviceType: DDR4 + OperatingSpeedMhz: 2400 + PartNumber: XYZ-123-1232 + RankCount: 2 + SerialNumber: k346456346346 + - ID: x0c0s0b0n0d3 + Type: Memory + Ordinal: 3 + Status: Empty + HWInventoryByLocationType: HWInvByLocMemory + MemoryLocationInfo: + Id: DIMM3 + Name: Socket 2 DIMM Slot 2 + MemoryLocation: + Socket: 2 + MemoryController: 2 + Channel: 1 + Slot: 2 + PopulatedFRU: + HWInvByLocProcessor: + description: >- + This is a subtype of HWInventoryByLocation for HMSType Processor. + It represents a primary CPU type (e.g. non-accelerator). + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocProcessor'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + ProcessorLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishProcessorLocationInfo' + type: object + example: + description: + By default, listed as subcomponent of Node, see example there. + HWInvByLocNodeAccel: + description: >- + This is a subtype of HWInventoryByLocation for HMSType NodeAccel. + It represents a GPU type (e.g. accelerator). + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocNodeAccel'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + NodeAccelLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishProcessorLocationInfo' + type: object + example: + description: + By default, listed as subcomponent of Node. + HWInvByLocDrive: + description: >- + This is a subtype of HWInventoryByLocation for HMSType Drive. + It represents a disk drive. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocDrive'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + DriveLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishDriveLocationInfo' + type: object + example: + description: + By default, listed as subcomponent of Node, see example there. + HWInvByLocMemory: + description: >- + This is a subtype of HWInventoryByLocation for HMSType Memory. + It represents a DIMM or other memory module type. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocMemory'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + MemoryLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishMemoryLocationInfo' + type: object + example: + description: + By default, listed as subcomponent of Node, see example there. + HWInvByLocPDU: + description: >- + This is a subtype of HWInventoryByLocation for HMSType CabinetPDU. + It represents a master or slave PowerDistribution aka PDU component. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocPDU'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + PDULocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishPDULocationInfo' + CabinetPDUPowerConnectors: + description: >- + Embedded Outlets HWInv object array representing + outlets of this PDU. + items: + $ref: '#/definitions/HWInvByLocOutlet' + readOnly: true + type: array + type: object + example: + ID: x0m0p0 + Type: CabinetPDU + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocPDU + PDULocationInfo: + Id: "1" + Name: RackPDU1 + Description: Description of PDU, per manufacturing + UUID: 32354641-4135-4332-4a35-313735303734 + PopulatedFRU: + FRUID: "CabinetPDU.29347ZT536" + Type: CabinetPDU + HWInventoryByFRUType: HWInvByFRUPDU + PDUFRUInfo: + FirmwareVersion: 4.3.0 + EquipmentType: RackPDU + Manufacturer: Contoso + CircuitSummary: + TotalPhases: 3 + TotalBranches: 4 + TotalOutlets: 16 + MonitoredPhases: 3 + ControlledOutlets: 8 + MonitoredBranches: 4 + MonitoredOutlets: 12 + AssetTag: PDX-92381 + DateOfManufacture: 2017-01-11T08:00:00Z + HardwareRevision: 1.03b + Model: ZAP4000 + SerialNumber: 29347ZT536 + PartNumber: AA-23 + CabinetPDUPowerConnectors: + - ID: x0m0p0v1 + Type: CabinetPDUPowerConnector + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocOutlet + OutletLocationInfo: + Id: A1 + Name: Outlet A1, Branch Circuit A + Description: Outlet description + PopulatedFRU: + FRUID: "CabinetPDUPowerConnector.0.CabinetPDU.29347ZT536" + Type: CabinetPDUPowerConnector + HWInventoryByFRUType: HWInvByFRUOutlet + OutletFRUInfo: + PowerEnabled: true + NominalVoltage: AC120V + RatedCurrentAmps: 20 + VoltageType: AC + OutletType: NEMA_5_20R + PhaseWiringType: OnePhase3Wire + - ID: x0m0p0v2 + Type: CabinetPDUPowerConnector + Ordinal: 2 + Status: Populated + HWInventoryByLocationType: HWInvByLocOutlet + OutletLocationInfo: + Id: A2 + Name: Outlet A2, Branch Circuit A + Description: Outlet description + PopulatedFRU: + FRUID: "CabinetPDUPowerConnector.1.CabinetPDU.29347ZT536" + Type: CabinetPDUPowerConnector + HWInventoryByFRUType: HWInvByFRUOutlet + OutletFRUInfo: + PowerEnabled: true + NominalVoltage: AC120V + RatedCurrentAmps: 20 + VoltageType: AC + OutletType: NEMA_5_20R + PhaseWiringType: OnePhase3Wire + HWInvByLocOutlet: + description: >- + This is a subtype of HWInventoryByLocation for HMSType CabinetPDUPowerConnector. + It an outlet that is a child of of a parent master or slave PDU. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocOutlet'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + OutletLocationInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishOutletLocationInfo' + type: object + example: + description: + By default, listed as subcomponent of PDU, see example there. + HWInvByLocCMMRectifier: + description: >- + This is a subtype of HWInventoryByLocation for HMSType CMMRectifier. + It represents a power supply. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocCMMRectifier'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + CMMRectifierLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishCMMRectifierLocationInfo' + type: object + HWInvByLocNodeAccelRiser: + description: >- + This is a subtype of HWInventoryByLocation for HMSType NodeAccelRiser. + It represents a GPUSubsystem baseboard. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocNodeAccelRiser'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + NodeAccelRiserLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishNodeAccelRiserLocationInfo' + type: object + HWInvByLocNodeEnclosurePowerSupply: + description: >- + This is a subtype of HWInventoryByLocation for HMSType NodeEnclosurePowerSupply. + It represents a power supply. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocNodeEnclosurePowerSupply'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + NodeEnclosurePowerSupplyLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishNodeEnclosurePowerSupplyLocationInfo' + type: object + HWInvByLocNodeBMC: + description: >- + This is a subtype of HWInventoryByLocation for HMSType NodeBMC. + It represents a NodeBMC. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocNodeBMC'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + NodeBMCLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishManagerLocationInfo' + type: object + HWInvByLocRouterBMC: + description: >- + This is a subtype of HWInventoryByLocation for HMSType RouterBMC. + It represents a RouterBMC. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocRouterBMC'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + RouterBMCLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_RedfishManagerLocationInfo' + type: object + HWInvByLocHSNNIC: + description: >- + This is a subtype of HWInventoryByLocation for HMSType NodeHSNNIC. + It represents a NodeHSNNIC. + It is selected via the 'discriminator: HWInventoryByLocationType' + of HWInventoryByLocation when HWInventoryByLocationType is + 'HWInvByLocHSNNIC'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByLocation' + - type: object + properties: + HSNNICLocationInfo: + $ref: + '#/definitions/HWInventory.1.0.0_HSNNICLocationInfo' + type: object + HWInventory.1.0.0_RedfishChassisLocationInfo: + description: >- + These are pass-through properties of the Redfish Chassis object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturer's naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the BMC implementation. + type: string + readOnly: true + Hostname: + description: >- + This is a pass-through of the Redfish value of the same name. + Note this is simply what (if anything) Redfish has been told + the hostname is. It isn't necessarily its hostname on any particular + network interface (e.g. the HMS management network). + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishSystemLocationInfo: + description: >- + These are pass-through properties of the Redfish ComputerSystem object + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturer's naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the BMC implementation. + type: string + readOnly: true + Hostname: + description: >- + This is a pass-through of the Redfish value of the same name. + Note this is simply what (if anything) Redfish has been told + the hostname is. It isn't necessarily its hostname on any particular + network interface (e.g. the HMS management network). + type: string + readOnly: true + ProcessorSummary: + description: >- + This is a summary of the installed processors, if any. + It is taken from ComputerSystem.1.0.0_ProcessorSummary. + properties: + Count: + description: The number of processors in the system. + minimum: 0 + readOnly: true + type: number + Model: + description: >- + The processor model for the primary or majority of processors in + this system. + readOnly: true + type: string + type: object + readOnly: true + MemorySummary: + description: >- + This object describes the memory of the system in general detail. + It is taken from ComputerSystem.1.0.0_MemorySummary. + properties: + TotalSystemMemoryGiB: + description: >- + The total installed, operating system-accessible memory (RAM), + measured in GiB. + minimum: 0 + readOnly: true + type: number + type: object + readOnly: true + type: object + HWInventory.1.0.0_RedfishProcessorLocationInfo: + description: >- + These are pass-through properties of the Redfish Processor object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturer's naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the BMC implementation. + type: string + readOnly: true + Socket: + description: >- + This is a pass-through of the Redfish value of the same name. + It represents the socket or location of the processor, and may + differ from the normalized HMS Ordinal value (or xname) that is + always indexed from 0. Manufacturers may or may not use zero + indexing (or may have some other naming scheme for sockets) and + so we retain this information to resolve any ambiguity when + servicing the component. + readOnly: true + type: string + type: object + HWInventory.1.0.0_RedfishDriveLocationInfo: + description: >- + These are pass-through properties of the Redfish Drive object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturer's naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the BMC implementation. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishMemoryLocationInfo: + description: >- + These are pass-through properties of the Redfish Memory object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturer's naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the BMC implementation. + type: string + readOnly: true + MemoryLocation: + description: >- + Describes the location of the memory module. Note that the + indexing of these fields are set by the manufacturer and may + not start at zero (or one for that matter) and therefore are + for informational/servicing purposes only. + This object and its fields are again a pass-through from Redfish. + properties: + Socket: + description: >- + Socket number (numbering may vary by manufacturer). + minimum: 0 + readOnly: true + type: number + MemoryController: + description: >- + Memory controller number (numbering may vary by manufacturer). + minimum: 0 + readOnly: true + type: number + Channel: + description: >- + Channel number (numbering may vary by manufacturer). + minimum: 0 + readOnly: true + type: number + Slot: + description: >- + Slot number (numbering may vary by manufacturer). + minimum: 0 + readOnly: true + type: number + type: object + type: object + HWInventory.1.0.0_RedfishPDULocationInfo: + description: >- + These are pass-through properties of the Redfish PowerDistribution object + type that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturers naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the implementation. + type: string + readOnly: true + UUID: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishOutletLocationInfo: + description: >- + These are pass-through properties of the Redfish PDU Outlet object + type that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturers naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the implementation. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishCMMRectifierLocationInfo: + description: >- + These are pass-through properties of the Redfish Power Supply object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + FirmwareVersion: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishNodeAccelRiserLocationInfo: + description: >- + These are the properties of the NodeAccelRiser type + that are passed-through to the HMS inventory data when the underlying Redfish object + type is an Assembly with a PhysicalContext of GPUSubsystem. These are the + properties of a specific hardware instance/FRU that may change if + the component is relocated within the system. Child of a Chassis. + properties: + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishNodeEnclosurePowerSupplyLocationInfo: + description: >- + These are pass-through properties of the Redfish Power Supply object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + FirmwareVersion: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishManagerLocationInfo: + description: >- + These are pass-through properties of the Redfish Manager object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + DateTime: + description: >- + This is a pass-through of the Redfish value of the same name. + The current date and time with UTC offset that the manager uses to set or read time. + type: string + readOnly: true + DateTimeLocalOffset: + description: >- + This is a pass-through of the Redfish value of the same name. + The time offset from UTC that the DateTime property is in +HH:MM format. + type: string + readOnly: true + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + This is an informational description set by the implementation. + type: string + readOnly: true + FirmwareVersion: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + readOnly: true + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + The Id is included for informational purposes. The RedfishEndpoint + objects are intended to help locate and interact with HMS components + via the Redfish endpoint, so this is mostly needed in case + servicing the component requires its ID/name according to a + particular COTS manufacturers naming scheme within, for example, + a particular server enclosure. + type: string + readOnly: true + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + This is included for informational purposes as the naming will + likely vary from manufacturer-to-manufacturer, but should help + match items up to manufacturer's documentation if the normalized + HMS naming scheme is too vague for some COTS systems. + type: string + readOnly: true + type: object + HWInventory.1.0.0_HSNNICLocationInfo: + description: >- + These are pass-through properties of the Node HSN NIC object type + that are also used in HMS inventory data. They will be mostly + informational as exactly how fields are set depends on how + the particular implementation does things, but will be useful for + servicing. + properties: + Description: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + Id: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + Name: + description: >- + This is a pass-through of the Redfish value of the same name. + type: string + type: object + # + # Hardware Inventory by FRU - This is the device-specific attributes that + # are specific to an individual piece of hardware, regardless of its + # current location, if any. + # + HWInventory.1.0.0_HWInventoryByFRU: + description: >- + This represents a physical piece of hardware with properties specific + to a unique component in the system. It is the counterpart to + HWInventoryByLocation (which contains ONLY information specific to + a particular location in the system that may or may not be populated), + in that it contains only info about the component that is durably + consistent wherever the component is installed in the system (if it + is still installed at all). + properties: + FRUID: + # The FRU identifier + $ref: '#/definitions/FRUId.1.0.0' + Type: + # The HMS Type of the FRU + $ref: '#/definitions/HMSType.1.0.0' + FRUSubtype: + description: TBD. + type: string + HWInventoryByFRUType: + description: >- + This is used as a discriminator to determine the additional HMS-type + specific subtype that is returned. + enum: + - HWInvByFRUCabinet + - HWInvByFRUChassis + - HWInvByFRUComputeModule + - HWInvByFRURouterModule + - HWInvByFRUNodeEnclosure + - HWInvByFRUHSNBoard + - HWInvByFRUMgmtSwitch + - HWInvByFRUMgmtHLSwitch + - HWInvByFRUCDUMgmtSwitch + - HWInvByFRUNode + - HWInvByFRUProcessor + - HWInvByFRUNodeAccel + - HWInvByFRUNodeAccelRiser + - HWInvByFRUDrive + - HWInvByFRUMemory + - HWInvByFRUPDU + - HWInvByFRUOutlet + - HWInvByFRUCMMRectifier + - HWInvByFRUNodeEnclosurePowerSupply + - HWInvByFRUNodeBMC + - HWInvByFRURouterBMC + - HWIncByFRUHSNNIC + type: string + type: object + discriminator: HWInventoryByFRUType + required: + - HWInventoryByFRUType + example: + FRUID: Dell-99999-1234-1234-2345 + Type: Node + Subtype: River + HWInventoryByFRUType: HWInvByFRUNode + NodeFRUInfo: + AssetTag: AdminAssignedAssetTag + BiosVersion: v1.0.2.9999 + Model: OKS0P2354 + Manufacturer: Dell + PartNumber: y99999 + SerialNumber: 1234-1234-2345 + SKU: as213234 + SystemType: Physical + UUID: 26276e2a-29dd-43eb-8ca6-8186bbc3d971 + HWInvByFRUCabinet: + description: >- + This is a subtype of HWInventoryByFRU for HMSType Cabinet. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUCabinet'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + CabinetFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRUChassis: + description: >- + This is a subtype of HWInventoryByFRU for HMSType Chassis. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUChassis'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + ChassisFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRUComputeModule: + description: >- + This is a subtype of HWInventoryByFRU for HMSType ComputeModule. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUComputeModule'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + ComputeModuleFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRURouterModule: + description: >- + This is a subtype of HWInventoryByFRU for HMSType RouterModule. + This is a Mountain switch module. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRURouterModule'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + RouterModuleFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRUNodeEnclosure: + description: >- + This is a subtype of HWInventoryByFRU for HMSType NodeEnclosure. + It represents a Mountain node card or River rack enclosure. It is + NOT the BMC, which is separate and corresponds to a Redfish Manager. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUNodeEnclosure'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + NodeEnclosureFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + HWInvByFRUHSNBoard: + description: >- + This is a subtype of HWInventoryByFRU for HMSType HSNBoard. + It represents a Mountain switch card or River TOR enclosure. It is + NOT the BMC, which is separate and corresponds to a Redfish Manager. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUHSNBoard'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + HSNBoardFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRUMgmtSwitch: + description: >- + This is a subtype of HWInventoryByFRU for HMSType MgmtSwitch. + It represents a management switch. It is selected via the + 'discriminator: HWInventoryByFRUType' of HWInventoryByFRU when + HWInventoryByFRUType is 'HWInvByFRUMgmtSwitch'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + MgmtSwitchFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRUMgmtHLSwitch: + description: >- + This is a subtype of HWInventoryByFRU for HMSType MgmtHLSwitch. + It represents a high level management switch. It is selected via the + 'discriminator: HWInventoryByFRUType' of HWInventoryByFRU when + HWInventoryByFRUType is 'HWInvByFRUMgmtHLSwitch'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + MgmtHLSwitchFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRUCDUMgmtSwitch: + description: >- + This is a subtype of HWInventoryByFRU for HMSType CDUMgmtSwitch. + It represents a CDU management switch. It is selected via the + 'discriminator: HWInventoryByFRUType' of HWInventoryByFRU when + HWInventoryByFRUType is 'HWInvByFRUCDUMgmtSwitch'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + CDUMgmtSwitchFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishChassisFRUInfo' + type: object + HWInvByFRUNode: + description: >- + This is a subtype of HWInventoryByFRU for HMSType Node. + It represents a service, compute, or system node. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUNode'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + NodeFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishSystemFRUInfo' + type: object + HWInvByFRUProcessor: + description: >- + This is a subtype of HWInventoryByFRU for HMSType Processor. + It represents a primary CPU type (e.g. non-accelerator). + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUProcessor'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + ProcessorFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishProcessorFRUInfo' + type: object + example: + FRUID: HOW-TO-ID-CPUS-FROM-REDFISH-IF-AT-ALL + Type: Processor + Subtype: SKL24 + HWInventoryByFRUType: HWInvByFRUProcessor + ProcessorFRUInfo: + InstructionSet: x86-64 + Manufacturer: Intel + MaxSpeedMHz: 2600 + Model: Intel(R) Xeon(R) CPU E5-2623 v4 @ 2.60GHz + ProcessorArchitecture: x86 + ProcessorId: + EffectiveFamily: 6 + EffectiveModel: 79 + IdentificationRegisters: 0x000406F1 + MicrocodeInfo: 0xB000017 + Step: 1 + VendorID: GenuineIntel + ProcessorType: CPU + TotalCores: 24 + TotalThreads: 48 + HWInvByFRUNodeAccel: + description: >- + This is a subtype of HWInventoryByFRU for HMSType NodeAccel. + It represents a GPU type (e.g. accelerator). + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUNodeAccel'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + NodeAccelFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishProcessorFRUInfo' + HWInvByFRUDrive: + description: >- + This is a subtype of HWInventoryByFRU for HMSType Drive. + It represents a disk drive type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUDrive'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + DriveFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishDriveFRUInfo' + type: object + example: + HWInventoryByFRUType: HWInvByFRUDrive + DriveFRUInfo: + SerialNumber: S45PNA0M540940 + Model: SAMSUNG MZ7LH480HAHQ-00005 + CapacityBytes: 503424483328 + FailurePredicted: false + HWInvByFRUMemory: + description: >- + This is a subtype of HWInventoryByFRU for HMSType Memory. + It represents a DIMM or other memory module type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByLocMemory'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + MemoryFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishMemoryFRUInfo' + type: object + HWInvByFRUPDU: + description: >- + This is a subtype of HWInventoryByFRU for PDU HMSTypes, e.g. CabinetPDU. + It represents a Redfish PowerDistribution master or slave PDU. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUPDU'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + PDUFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishPDUFRUInfo' + type: object + HWInvByFRUOutlet: + description: >- + This is a subtype of HWInventoryByFRU for Outlet HMSTypes, e.g. + CabinetPDUPowerConnector. It represents an outlet of a PDU. + It is selected via the "discriminator:" HWInventoryByFRUType + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUOutlet'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + OutletFRUInfoFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishOutletFRUInfo' + type: object + example: + ID: x0m0p0v1 + Type: CabinetPDUPowerConnector + Ordinal: 0 + Status: Populated + HWInventoryByLocationType: HWInvByLocOutlet + OutletLocationInfo: + Id: A1 + Name: Outlet A1, Branch Circuit A + Description: Outlet description + PopulatedFRU: + FRUID: "CabinetPDUPowerConnector.0.CabinetPDU.29347ZT536" + Type: CabinetPDUPowerConnector + HWInventoryByFRUType: HWInvByFRUOutlet + OutletFRUInfo: + PowerEnabled: true + NominalVoltage: AC120V + RatedCurrentAmps: 20 + VoltageType: AC + OutletType: NEMA_5_20R + PhaseWiringType: OnePhase3Wire + HWInvByFRUCMMRectifier: + description: >- + This is a subtype of HWInventoryByFRU for HMSType CMMRectifier. + It represents a power supply type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUCMMRectifier'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + PowerSupplyFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishCMMRectifierFRUInfo' + type: object + HWInvByFRUNodeAccelRiser: + description: >- + This is a subtype of HWInventoryByFRU for HMSType NodeAccelRiser. + It represents a GPUSubsystem baseboard type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUNodeAccelRiser'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + NodeAccelRiserFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishNodeAccelRiserFRUInfo' + type: object + HWInvByFRUNodeEnclosurePowerSupply: + description: >- + This is a subtype of HWInventoryByFRU for HMSType NodeEnclosurePowerSupply. + It represents a power supply type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUNodeEnclosurePowerSupply'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + NodeEnclosurePowerSupplyFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishNodeEnclosurePowerSupplyFRUInfo' + type: object + HWInvByFRUNodeBMC: + description: >- + This is a subtype of HWInventoryByFRU for HMSType NodeBMC. + It represents a Node BMC type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUNodeBMC'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + NodeBMCFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishManagerFRUInfo' + type: object + HWInvByFRURouterBMC: + description: >- + This is a subtype of HWInventoryByFRU for HMSType RouterBMC. + It represents a Router BMC type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRURouterBMC'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + RouterBMCFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_RedfishManagerFRUInfo' + type: object + HWInvByFRUHSNNIC: + description: >- + This is a subtype of HWInventoryByFRU for HMSType NodeHsnNic. + It represents a node HSN NIC type. + It is selected via the 'discriminator: HWInventoryByFRUType' + of HWInventoryByFRU when HWInventoryByFRUType is + 'HWInvByFRUHSNNIC'. + allOf: + - $ref: '#/definitions/HWInventory.1.0.0_HWInventoryByFRU' + - type: object + properties: + HSNNICFRUInfo: + $ref: '#/definitions/HWInventory.1.0.0_HSNNICFRUInfo' + type: object + HWInventory.1.0.0_RedfishChassisFRUInfo: + description: >- + These are pass-through properties of the Redfish Chassis object type + that are also used in HMS inventory data when this is the underlying + Redfish object type for a particular HMS component type. These are + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. + properties: + AssetTag: + description: The administratively-assigned asset tag for this chassis. + type: string + readOnly: true + ChassisType: + description: >- + This property indicates the type of physical form factor of this + resource (from Redfish - not all of these will likely appear + in practice. In any case, the HMS type and subtype will + identify the hardware type, this is for informational purposes + only). + enum: + - Rack + - Blade + - Enclosure + - StandAlone + - RackMount + - Card + - Cartridge + - Row + - Pod + - Expansion + - Sidecar + - Zone + - Sled + - Shelf + - Drawer + - Module + - Component + - Other + readOnly: true + type: string + Model: + description: Manufacturer-provided model number for part. + type: string + readOnly: true + Manufacturer: + description: Intended to provide the manufacturer of the part. + type: string + readOnly: true + PartNumber: + description: Manufacturer-provided part number for this component. + type: string + readOnly: true + SerialNumber: + description: Manufacturer-provided serial number for this component. + type: string + readOnly: true + SKU: + description: Manufacturer-provided SKU for this component. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishSystemFRUInfo: + description: >- + These are pass-through properties of the Redfish ComputerSystem object + that are also used in HMS inventory data. These are properties + of a specific hardware instance that remain the same if the component + is relocated within the system. + + Note that Redfish ComputerSystem objects are an abstract type that + represents a system, but not necessarily a specific piece of hardware. + Chassis objects represent things like the physical enclosure. The + system links to chassis and also to subcomponents that have their + own object types like Processors, Memory, and Storage. + + That said, they are a close fit to how we represent nodes in HMS and + so it makes sense to pass through their properties since that is + how we will discover this information anyways. + properties: + AssetTag: + description: The administratively-assigned asset tag for this chassis. + type: string + readOnly: true + BiosVersion: + description: The version of the system BIOS or primary system firmware. + type: string + readOnly: true + Model: + description: Manufacturer-provided model number for part. + type: string + readOnly: true + Manufacturer: + description: Intended to provide the manufacturer of the part. + type: string + readOnly: true + PartNumber: + description: Manufacturer-provided part number for this component. + type: string + readOnly: true + SerialNumber: + description: Manufacturer-provided serial number for this component. + type: string + readOnly: true + SKU: + description: Manufacturer-provided SKU for this component. + type: string + SystemType: + description: Type of system. Probably always physical for now. + enum: + - Physical + - Virtual + - OS + - PhysicallyPartitioned + - VirtuallyPartitioned + readOnly: true + type: string + UUID: + $ref: '#/definitions/UUID.1.0.0' + type: object + HWInventory.1.0.0_RedfishProcessorFRUInfo: + description: >- + These are pass-through properties of the Redfish Processor object type + that are also used in HMS inventory data. These are properties + of a specific processor instance that remain the same if it is + relocated within the system. + properties: + InstructionSet: + description: + The instruction set of the processor (Redfish pass-through) + enum: + - x86 + - x86-64 + - IA-64 + - ARM-A32 + - ARM-A64 + - MIPS32 + - MIPS64 + - OEM + readOnly: true + type: string + Manufacturer: + description: The processor manufacturer + readOnly: true + type: string + MaxSpeedMHz: + description: The maximum clock speed of the processor + readOnly: true + type: number + Model: + description: The product model number of this device + readOnly: true + type: string + ProcessorArchitecture: + description: The architecture of the processor + enum: + - x86 + - IA-64 + - ARM + - MIPS + - OEM + readOnly: true + type: string + ProcessorId: + description: >- + Identification information for this processor. Pass-through + from Redfish. + properties: + EffectiveFamily: + description: The effective Family for this processor + readOnly: true + type: string + EffectiveModel: + description: The effective Model for this processor + readOnly: true + type: string + IdentificationRegisters: + description: >- + The contents of the Identification Registers (CPUID) for this + processor + readOnly: true + type: string + MicrocodeInfo: + description: The Microcode Information for this processor + readOnly: true + type: string + Step: + description: The Step value for this processor + readOnly: true + type: string + VendorId: + description: The Vendor Identification for this processor + readOnly: true + type: string + type: object + ProcessorType: + description: The type of processor + enum: + - CPU + - GPU + - FPGA + - DSP + - Accelerator + - OEM + readOnly: true + type: string + TotalCores: + description: The total number of cores contained in this processor + readOnly: true + type: number + TotalThreads: + description: + The total number of execution threads supported by this processor + readOnly: true + type: number + type: object + HWInventory.1.0.0_RedfishDriveFRUInfo: + description: >- + These are pass-through properties of the Redfish Drive object type + that are also used in HMS inventory data. These are properties + of a specific drive instance that remain the same if it is + relocated within the system. + properties: + Manufacturer: + description: The drive manufacturer + readOnly: true + type: string + SerialNumber: + description: Unique identifier + readOnly: true + type: string + PartNumber: + description: Manufacturer part number + readOnly: true + type: string + Model: + description: Manufacturer model name + readOnly: true + type: string + SKU: + description: Manufacturer Stock Keeping Unit + readOnly: true + type: string + CapacityBytes: + description: Manufacturer Stock Keeping Unit + readOnly: true + type: number + Protocol: + description: The protocol that this drive currently uses to communicate to the storage controller. + enum: + - AHCI + - FC + - FCP + - FCoE + - FICON + - FTP + - GenZ + - HTTP + - HTTPS + - I2C + - MultiProtocol + - NFSv3 + - NFSv4 + - NVMe + - NVMeOverFabrics + - OEM + - PCIe + - RoCE + - RoCEv2 + - SAS + - SATA + - SFTP + - SMB + - TCP + - TFTP + - UDP + - UHCI + - USB + - iSCSI + - iWARP + readOnly: true + type: string + MediaType: + description: The type of media contained in this drive + enum: + - HDD + - SMR + - SSD + readOnly: true + type: string + RotationSpeedRPM: + description: The rotation speed of this drive, in revolutions per minute (RPM) + readOnly: true + type: number + BlockSizeBytes: + description: The size, in bytes, of the smallest addressable unit, or block + readOnly: true + type: integer + CapableSpeedGbs: + description: The speed, in gigabit per second (Gbit/s), at which this drive can communicate to a storage controller in ideal conditions. + readOnly: true + type: number + FailurePredicted: + description: An indication of whether this drive currently predicts a failure in the near future. + readOnly: true + type: boolean + EncryptionAbility: + description: The encryption ability of this drive. + enum: + - None + - Other + - SelfEncryptingDrive + readOnly: true + type: string + EncryptionStatus: + description: The status of the encryption of this drive. + enum: + - Foreign + - Locked + - Encrypted + - Unencrypted + - Unlocked + readOnly: true + type: string + NegotiatedSpeedGbs: + description: The speed, in gigabit per second (Gbit/s), at which this drive currently communicates to the storage controller. + readOnly: true + type: number + PredictedMediaLifeLeftPercent: + description: The percentage of reads and writes that are predicted to still be available for the media. + readOnly: true + type: number + type: object + HWInventory.1.0.0_RedfishMemoryFRUInfo: + description: >- + These are pass-through properties of the Redfish Memory object type + that are also used in HMS inventory data. These are properties + of a specific memory module that remain the same if it the module is + relocated within the system. + properties: + BaseModuleType: + description: The base module type of Memory. + enum: + - RDIMM + - UDIMM + - SO_DIMM + - LRDIMM + - Mini_RDIMM + - Mini_UDIMM + - SO_RDIMM_72b + - SO_UDIMM_72b + - SO_DIMM_16b + - SO_DIMM_32b + readOnly: true + type: string + BusWidthBits: + description: Bus width in bits. + readOnly: true + type: number + CapacityMiB: + description: Memory Capacity in MiB. + readOnly: true + type: number + DataWidthBits: + description: Data width in bits. + readOnly: true + type: number + ErrorCorrection: + description: >- + Whether single or multiple errors, or address parity errors can be + corrected. + enum: + - NoECC + - SingleBitECC + - MultiBitECC + - AddressParity + readOnly: true + type: string + Manufacturer: + description: The manufacturer of the memory module + readOnly: true + type: string + MemoryType: + description: Type of memory module. + enum: + - DRAM + - NVDIMM_N + - NVDIMM_F + - NVDIMM_P + readOnly: true + type: string + MemoryDeviceType: + description: Type details of the memory. + enum: + - DDR + - DDR2 + - DDR3 + - DDR4 + - DDR4_SDRAM + - DDR4E_SDRAM + - LPDDR4_SDRAM + - DDR3_SDRAM + - LPDDR3_SDRAM + - DDR2_SDRAM + - DDR2_SDRAM_FB_DIMM + - DDR2_SDRAM_FB_DIMM_PROBE + - DDR_SGRAM + - DDR_SDRAM + - ROM + - SDRAM + - EDO + - FastPageMode + - PipelinedNibble + readOnly: true + type: string + OperatingSpeedMhz: + description: Operating speed of Memory in MHz. + readOnly: true + type: number + PartNumber: + description: Manufacturer-provided part number for this component. + type: string + readOnly: true + RankCount: + description: Number of ranks available in the memory. + minimum: 0 + readOnly: true + type: number + SerialNumber: + description: Manufacturer-provided serial number for this component. + type: string + readOnly: true + type: object + HWInventory.1.0.0_RedfishPDUFRUInfo: + description: >- + These are pass-through properties of the Redfish PowerDistribution type + that are also used in HMS inventory data when this is the underlying + Redfish object type for a particular HMS component type. These are + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. + properties: + AssetTag: + description: The administratively-assigned asset tag for this chassis. + type: string + readOnly: true + DateOfManufacture: + description: Manufacturer-provided date-of-manufacture for part. + type: string + readOnly: true + EquipmentType: + description: >- + This property indicates the type of PowerDistribution + in practice. In any case, the HMS type and subtype will + identify the hardware type, this is for informational purposes + only). + enum: + - RackPDU + - FloorPDU + - ManualTransferSwitch + - AutomaticTransferSwitch + - Other + readOnly: true + type: string + FirmwareVersion: + description: Firmware version at time of discovery. + type: string + readOnly: true + HardwareRevision: + description: Manufacturer-provided HardwareRevision for part. + type: string + readOnly: true + Model: + description: Manufacturer-provided model number for part. + type: string + readOnly: true + Manufacturer: + description: Intended to provide the manufacturer of the part. + type: string + readOnly: true + PartNumber: + description: Manufacturer-provided part number for this component. + type: string + readOnly: true + SerialNumber: + description: Manufacturer-provided serial number for this component. + type: string + readOnly: true + SKU: + description: Manufacturer-provided SKU for this component. + type: string + readOnly: true + CircuitSummary: + description: Summary of circuits for PDU. + properties: + MonitoredOutlets: + description: Number of monitored outlets + readOnly: true + type: number + TotalPhases: + description: Number of phases in total + readOnly: true + type: number + ControlledOutlets: + description: Total number of controller outlets + readOnly: true + type: number + TotalOutlets: + description: Total number of outlets + readOnly: true + type: number + MonitoredBranches: + description: Number of monitored branches + readOnly: true + type: number + MonitoredPhases: + description: Number of monitored phases + readOnly: true + type: number + TotalBranches: + description: Number of total branches. + readOnly: true + type: number + type: object + readOnly: true + type: object + HWInventory.1.0.0_RedfishOutletFRUInfo: + description: >- + These are pass-through properties of the Redfish Outlet type + that are also used in HMS inventory data when this is the underlying + Redfish object type for a particular HMS component type. These are the + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. Child of a PDU. + properties: + VoltageType: + description: type of voltage + enum: + - AC + - DC + readOnly: true + type: string + NominalVoltage: + description: Nominal voltage for outlet. + readOnly: true + type: string + PowerEnabled: + description: Indicates if the outlet can be powered. + type: boolean + example: true + RatedCurrentAmps: + description: Rated current in amps + readOnly: true + type: number + OutletType: + description: Type of outlet. + readOnly: true + type: string + PhaseWiringType: + description: Phase wiring type + enum: + - OnePhase3Wire + - TwoPhase3Wire + - TwoPhase4Wire + - ThreePhase4Wire + - ThreePhase5Wire + readOnly: true + type: string + type: object + HWInventory.1.0.0_RedfishCMMRectifierFRUInfo: + description: >- + These are pass-through properties of the Redfish PowerSupply type + that are also used in HMS inventory data when this is the underlying + Redfish object type for a particular HMS component type. These are the + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. Child of a Chassis. + properties: + Manufacturer: + description: The manufacturer of this power supply. + readOnly: true + type: string + SerialNumber: + description: The serial number for this power supply. + readOnly: true + type: string + Model: + description: The model number for this power supply. + readOnly: true + type: string + PartNumber: + description: The part number for this power supply. + readOnly: true + type: string + PowerCapacityWatts: + description: The maximum capacity of this power supply. + readOnly: true + type: number + PowerInputWatts: + description: The measured input power of this power supply. + readOnly: true + type: number + PowerOutputWatts: + description: The measured output power of this power supply. + readOnly: true + type: number + PowerSupplyType: + description: The power supply type (AC or DC). + readOnly: true + type: string + type: object + HWInventory.1.0.0_RedfishNodeAccelRiserFRUInfo: + description: >- + These are the properties of the NodeAccelRiser type + that are passed-through to the HMS inventory data when the underlying Redfish object + type is an Assembly with a PhysicalContext of GPUSubsystem. These are the + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. Child of a Chassis. + properties: + Producer: + description: The manufacturer of this riser card. + readOnly: true + type: string + SerialNumber: + description: The serial number for this riser card. + readOnly: true + type: string + Model: + description: The model number for this riser card. + readOnly: true + type: string + PartNumber: + description: The part number for this riser card. + readOnly: true + type: string + ProductionDate: + description: The date of production of this riser card. + readOnly: true + type: string + Version: + description: The version of this riser card. + readOnly: true + type: string + EngineeringChangeLevel: + description: The engineering change level of this riser card. + readOnly: true + type: string + PhysicalContext: + description: The hardware type of this riser card. + readOnly: true + type: string + type: object + HWInventory.1.0.0_RedfishNodeEnclosurePowerSupplyFRUInfo: + description: >- + These are pass-through properties of the Redfish PowerSupply type + that are also used in HMS inventory data when this is the underlying + Redfish object type for a particular HMS component type. These are the + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. Child of a Chassis. + properties: + Manufacturer: + description: The manufacturer of this power supply. + readOnly: true + type: string + SerialNumber: + description: The serial number for this power supply. + readOnly: true + type: string + Model: + description: The model number for this power supply. + readOnly: true + type: string + PartNumber: + description: The part number for this power supply. + readOnly: true + type: string + PowerCapacityWatts: + description: The maximum capacity of this power supply. + readOnly: true + type: number + PowerInputWatts: + description: The measured input power of this power supply. + readOnly: true + type: number + PowerOutputWatts: + description: The measured output power of this power supply. + readOnly: true + type: number + PowerSupplyType: + description: The power supply type (AC or DC). + readOnly: true + type: string + type: object + HWInventory.1.0.0_RedfishManagerFRUInfo: + description: >- + These are pass-through properties of the Redfish Manager type + that are also used in HMS inventory data when this is the underlying + Redfish object type for a particular HMS component type. These are the + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. + properties: + ManagerType: + description: The type of manager that this Resource represents, i.e. BMC, EnclosureManager, RackManager, etc. + readOnly: true + type: string + Manufacturer: + description: The manufacturer of this manager. + readOnly: true + type: string + SerialNumber: + description: The serial number for this manager. + readOnly: true + type: string + Model: + description: The model number for this manager. + readOnly: true + type: string + PartNumber: + description: The part number for this manager. + readOnly: true + type: string + type: object + HWInventory.1.0.0_HSNNICFRUInfo: + description: >- + These are pass-through properties of the Node HSN NIC type + that are also used in HMS inventory data when this is the underlying + network object type for a particular HMS component type. These are the + properties of a specific hardware instance/FRU that remain the same if + the component is relocated within the system. + properties: + Manufacturer: + description: The manufacturer of this HSN NIC. + type: string + Model: + description: The model of this HSN NIC. + type: string + PartNumber: + description: The part number for this HSN NIC. + type: string + SKU: + description: The SKU for this HSN NIC. + type: string + SerialNumber: + description: The serial number for this HSN NIC. + type: string + type: object + # + # Hardware Inventory History - This is the historical data for each FRU + # and location tracked by HSM. + # + HWInventory.1.0.0_HWInventoryHistoryCollection: + description: >- + This is the array of sorted history entries (by FRU or by location). + properties: + Components: + type: array + items: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryHistoryArray' + type: object + HWInventory.1.0.0_HWInventoryHistoryArray: + description: >- + This is the array of history entries for a particular FRU or component location (xname). + properties: + ID: + type: string + description: >- + Locational xname or FRU ID of the component associated with the history entries in + the 'History' array. + History: + type: array + items: + $ref: '#/definitions/HWInventory.1.0.0_HWInventoryHistory' + type: object + HWInventory.1.0.0_HWInventoryHistory: + description: >- + This is a HWInventory history entry. Each time a HWInventory event happens a history + record is created with associated data including locational xname, FRU ID, timestamp, + and event type (Added, Removed, Scanned, etc). + properties: + ID: + description: >- + Uniquely identifies the component by its physical location (xname). + $ref: '#/definitions/XName.1.0.0' + FRUID: + # The FRU identifier + $ref: '#/definitions/FRUId.1.0.0' + Timestamp: + description: The time that the history entry was created. + format: date-time + type: string + example: '2018-08-09 03:55:57.000000' + EventType: + description: Describes the type of event the history entry was created for. + enum: + - Added + - Removed + - Scanned + type: string + example: Added + type: object + ######################################################################### + # + # RedfishEndpoint data structures - Represents component running + # Redfish service entry point. + # + ######################################################################### + RedfishEndpoint.1.0.0_RedfishEndpoint: + description: >- + This describes a RedfishEndpoint that is interrogated in order to + perform discovery of the components below it. It is a BMC or + card/blade controller or other device that operates a Redfish + entry point through which the components underneath it may be + discovered and managed. + properties: + ID: + $ref: '#/definitions/XNameRFEndpoint.1.0.0' + Type: + # HMS Logical component type e.g. NodeBMC, ChassisBMC. + # This may be left blank and will be detected from the ID/xname + # and/or by the discovery process. + $ref: '#/definitions/HMSType.1.0.0' + Name: + description: >- + This is an arbitrary, user-provided name for the endpoint. It can + describe anything that is not captured by the ID/xname. + type: string + Hostname: + description: >- + Hostname of the endpoint's FQDN, will always be the host portion of + the fully-qualified domain name. + Note that the hostname should normally always be the same as the ID + field (i.e. xname) of the endpoint. + type: string + Domain: + description: >- + Domain of the endpoint's FQDN. Will always match remaining + non-hostname portion of fully-qualified domain name (FQDN). + type: string + FQDN: + description: >- + Fully-qualified domain name of RF endpoint on management network. + This is not writable because it is made up of the Hostname and + Domain. + type: string + Enabled: + description: >- + To disable a component without deleting its data from the database, + can be set to false + type: boolean + example: true + UUID: + # UUID of Redfish service root + $ref: '#/definitions/UUID.1.0.0' + User: + description: Username to use when interrogating endpoint + type: string + Password: + description: >- + Password to use when interrogating endpoint, normally + suppressed in output. + type: string + UseSSDP: + description: Whether to use SSDP for discovery if the EP supports it. + type: boolean + MacRequired: + description: >- + Whether the MAC must be used (e.g. in River) in setting up + geolocation info so the endpoint's location in the system can be + determined. The MAC does not need to be provided when creating the + endpoint if the endpoint type can arrive at a geolocated hostname + on its own. + type: boolean + MACAddr: + description: >- + This is the MAC on the of the Redfish Endpoint on the + management network, i.e. corresponding to the FQDN field's + Ethernet interface where the root service is running. + Not the HSN MAC. + This is a MAC address in the standard colon-separated 12 byte hex + format. + pattern: '^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$' + type: string + example: ae:12:e2:ff:89:9d + IPAddress: + description: >- + This is the IP of the Redfish Endpoint on the + management network, i.e. corresponding to the FQDN field's + Ethernet interface where the root service is running. This may be + IPv4 or IPv6 + type: string + example: 10.254.2.10 + RediscoverOnUpdate: + description: Trigger a rediscovery when endpoint info is updated. + type: boolean + TemplateID: + description: >- + Links to a discovery template defining how the endpoint should + be discovered. + type: string + DiscoveryInfo: + description: >- + Contains info about the discovery status of the given endpoint. + properties: + LastDiscoveryAttempt: + description: The time the last discovery attempt took place. + format: date-time + readOnly: true + type: string + LastDiscoveryStatus: + description: Describes the outcome of the last discovery attempt. + enum: + - EndpointInvalid + - EPResponseFailedDecode + - HTTPsGetFailed + - NotYetQueried + - VerificationFailed + - ChildVerificationFailed + - DiscoverOK + type: string + readOnly: true + RedfishVersion: + description: Version of Redfish as reported by the RF service root. + type: string + readOnly: true + type: object + readOnly: true + # ComponentEndpoints: + # items: + # $ref: '#/definitions/ComponentEndpoint.1.0.0_ComponentEndpoint' + # type: array + # readOnly: true + # ServiceEndpoints: + # items: + # $ref: '#/definitions/ServiceEndpoint.1.0.0_ServiceEndpoint' + # type: array + # readOnly: true + type: object + required: + - ID + RedfishEndpoint.1.0.0_ResourceURICollection: + properties: + Name: + description: >- + Should describe the collection, though the type of resources + the links correspond to should also be inferred from the context + in which the collection was obtained. + type: string + readOnly: true + example: (Type of Object) Collection + Members: + description: An array of ResourceIds. + items: + $ref: '#/definitions/ResourceURI.1.0.0' + type: array + readOnly: true + MemberCount: + description: Number of ResourceURIs in the collection + type: number + format: int32 + readOnly: true + type: object + # + # RedfishEndpointArray - Used for queries returning 0->n RedfishEndpoints + # + RedfishEndpointArray_RedfishEndpointArray: + description: >- + This is a collection of RedfishEndpoint objects returned whenever a + query is expected to result in 0 to n matches. + properties: + RedfishEndpoints: + description: Contains the HMS RedfishEndpoint objects in the array. + items: + $ref: '#/definitions/RedfishEndpoint.1.0.0_RedfishEndpoint' + type: array + type: object + # + # RedfishEndpoint POST query bodies + # + RedfishEndpointArray_PostQuery: + description: >- + There are limits to the length of an HTTP URL and query string. + Hence, if we wish to query an arbitrary list of XName/IDs, it + will need to be in the body of the request. This object is + used for this purpose. It is similar to the analogous GET operation. + properties: + RedfishEndpointIDs: + description: >- + An array of XName/ID values for the RedfishEndpoints to query. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + partition: + $ref: '#/definitions/XNamePartition.1.0.0' + required: + - RedfishEndpointIDs + type: object + ######################################################################### + # + # ServiceEndpoint - Captures discovered data about service running on a + # particular RedfishEndpoint + # + ######################################################################### + ServiceEndpoint.1.0.0_ServiceEndpoint: + description: >- + This describes a service running on a Redfish endpoint and is populated + when Redfish endpoint discovery occurs. It is used by clients who + need to interact directly with the service via Redfish. + + There are also ComponentEndpoints, which represent Redfish components of + a physical type (i.e., we track their state as components), which are + also discovered when the Redfish Endpoint is discovered. + + The RedfishEndpointID is just the ID of the parent Redfish endpoint. + As there are many service types per endpoint, the RedfishType + must also be included to get a unique entry for a service. Services + do not have their own xnames, and so they are identified by thee + combination of the RedfishEndpointID they are running on, plus + the RedfishType value (e.g. AccountService, TaskService, etc.). + + NOTE: These records are discovered, not created, and therefore are not + writable (since any changes would be overwritten by a subsequent + discovery). + properties: + RedfishEndpointID: + # Unique identifier for the parent RedfishEndpoint by its location + # i.e. xname. This is essentially a back-reference to the RF endpoint + # that was used to discover this service. + $ref: '#/definitions/XNameRFEndpoint.1.0.0' + RedfishType: + # This is the Redfish service type, not to be confused with the HMS + # component type. In this case, may be AccountService, TaskService, + # or so on. + $ref: '#/definitions/RedfishType.1.0.0' + RedfishSubtype: + # This may not be relevant to RF services like it is for components, + # so it is likely to not be present or empty. + $ref: '#/definitions/RedfishSubtype.1.0.0' + UUID: + $ref: '#/definitions/UUID.1.0.0' + OdataID: + # This is the relative path to the component relative to the parent + # RedfishEndpoint's service root. + $ref: '#/definitions/OdataID.1.0.0' + RedfishEndpointFQDN: + description: >- + This is a back-reference to the fully-qualified domain name of the + parent Redfish endpoint that was used to discover the component. It + is the RedfishEndpointID field i.e. the hostname/xname plus its + current domain. + type: string + readOnly: true + RedfishURL: + description: >- + This is the complete URL to the corresponding Redfish object, + combining the RedfishEndpoint's FQDN and the OdataID. + type: string + readOnly: true + ServiceInfo: + # This is any additional information for the service. + $ref: '#/definitions/ServiceEndpoint.1.0.0_ServiceInfo' + type: object + ServiceEndpoint.1.0.0_ServiceInfo: + description: >- + This is any additional information for the service. This is service + specific. Schema for Redfish services can be found at + https://redfish.dmtf.org/redfish/schema_index + properties: + Name: + description: The name of the service. + type: string + readOnly: true + type: object + ServiceEndpointArray_ServiceEndpointArray: + description: >- + This is a collection of ServiceEndpoint objects returned whenever a + query is expected to result in 0 to n matches. + properties: + ServiceEndpoints: + description: Contains the HMS ServiceEndpoint objects in the array. + items: + $ref: '#/definitions/ServiceEndpoint.1.0.0_ServiceEndpoint' + type: array + type: object + ######################################################################### + # + # CompEthInterface - Captures discovered data about component Ethernet + # interfaces on a particular ComponentEndpoint + # + ######################################################################### + CompEthInterface.1.0.0: + description: >- + A component Ethernet interface is an object describing a relation between + a MAC address and IP address for components. + properties: + ID: + description: >- + The ID of the component Ethernet interface. + type: string + readOnly: true + example: a4bf012b7310 + Description: + description: >- + An optional description for the component Ethernet interface. + type: string + MACAddress: + description: >- + The MAC address of this component Ethernet interface + type: string + IPAddresses: + description: >- + The IP addresses associated with the MAC address for this component Ethernet interface. + type: array + items: + $ref: '#/definitions/CompEthInterface.1.0.0_IPAddressMapping' + LastUpdate: + description: >- + A timestamp for when the component Ethernet interface last was modified. + format: date-time + type: string + readOnly: true + example: '2020-05-13T19:18:45.524974Z' + ComponentID: + description: >- + The xname of the component with this Ethernet interface. Maybe blank if + the component has not been discovered yet. + $ref: '#/definitions/XNameRW.1.0.0' + Type: + description: >- + HMS component type of the component with this Ethernet interface. Maybe blank if + the component has not been discovered yet. + $ref: '#/definitions/HMSType.1.0.0' + type: object + required: + - MACAddress + CompEthInterface.1.0.0_Patch: + description: >- + To update the IP addresses, CompID, and/or description fields of a component Ethernet interface, + a PATCH operation can be used. Omitted fields are not updated. + NOTE: Updating the IP addresses field updates the LastUpdate field. + properties: + Description: + description: >- + An optional description for the component Ethernet interface. + type: string + IPAddresses: + description: >- + The IP addresses associated with the MAC address for this component Ethernet interface. + type: array + items: + $ref: '#/definitions/CompEthInterface.1.0.0_IPAddressMapping' + ComponentID: + description: >- + The xname of the component with this Ethernet interface. + $ref: '#/definitions/XNameRW.1.0.0' + type: object + CompEthInterface.1.0.0_IPAddressMapping: + description: >- + A IP address Mapping maps a IP address to a network. In a Component Ethernet Interface it is used to describe what IP addresses and their networks that are associated with it. + properties: + IPAddress: + description: >- + The IP address associated with the MAC address for this component Ethernet interface on for this particular network. + type: string + example: 10.252.0.1 + Network: + type: string + description: >- + The network that this IP addresses is associated with. + example: HMN + type: object + required: + - IPAddress + CompEthInterface.1.0.0_IPAddressMapping_Patch: + description: >- + To update the network field a IP address mapping in a component + Ethernet interface a PATCH operation can be used. Omitted fields are not updated. + properties: + Network: + type: string + description: >- + The network that this IP addresses is associated with. + type: object + ########################################################################### + # + # Discover payload and DiscoveryStatus object definitions + # + ########################################################################### + DiscoveryStatus.1.0.0_DiscoveryStatus: + description: >- + Returns info on the current status of a discovery operation with the + given ID returned when a Discover action is requested. + properties: + ID: + description: The ID number of the discover operation. + type: number + format: int32 + readOnly: true + example: 0 + Status: + description: Describes the status of the given Discover operation. + enum: + - NotStarted + - Pending + - InProgress + - Complete + type: string + readOnly: true + example: Complete + LastUpdateTime: + description: The time that the Status field was last updated. + readOnly: true + format: date-time + type: string + example: '2018-08-09 03:55:57.000000' + Details: + $ref: '#/definitions/DiscoveryStatus.1.0.0_Details' + type: object + DiscoveryStatus.1.0.0_Details: + description: >- + Details accompanying a DiscoveryStatus entry. Optional. + Reserved for future use. + type: object + example: null + Discover.1.0.0_DiscoverInput: + description: >- + The POST body for a Discover operation. Note that these fields are + optional. The default for the xnames field is to select all + RedfishEndpoints. The default for force is false. + properties: + xnames: + description: >- + An array of XName/ID values for the RedfishEndpoints to discover. + If zero-length or omitted, all RedfishEndpoints will be discovered. + items: + $ref: '#/definitions/XNameRFEndpoint.1.0.0' + type: array + force: + description: >- + Whether to force discovery if there is already a conflicting + DiscoveryStatus entry that is either Pending or InProgress. + default is false. + type: boolean + example: false + type: object + ########################################################################### + # + # System Information Block (SIB) object definitions + # + ########################################################################### + # SystemInformationBlock.1.0.0_SystemInformationBlock: + # description: >- + # The SystemInformationBlock (SIB) nests Component, HWInventory, HSNType, + # and HSNInfo subschemas. + # This provides information + # that may be needed by a large number of endpoints in a single, + # self-contained object. While the all of the individual sections, + # (except for the HSNInfo, which is obtained from the fabric manager) + # can be queried using several of the more general APIs for these + # structures, the SIB supports the use of a single API that clients may + # use to bootstrap their initial system state, for example. + # properties: + # XName: + # description: The component xname used to query the SIB + # type: string + # example: s0 + # ItemsQueried: + # description: >- + # The subsections of the SIB requested in the query that + # produced this SIB. This is given by the 'items' query + # parameter or POST body property. + # properties: + # NodeList: + # type: boolean + # HWInventory: + # type: boolean + # HSNType: + # type: boolean + # HSNInfo: + # type: boolean + # type: object + # SIBItems: + # properties: + # NodeList: + # $ref: '#/definitions/ComponentArray_ComponentArray' + # HSNType: + # $ref: '#/definitions/NetType.1.0.0' + # HWInventory: + # $ref: '#/definitions/HWInventory.1.0.0_HWInventory' + # HSNInfo: + # $ref: '#/definitions/HSNInfo.1.0.0' + # type: object + # readOnly: true + # type: object + # readOnly: true + # # + # # Queries via POST for SIB data + # # + # SystemInformationBlock.1.0.0_PostQuery: + # description: >- + # A POST query that can be used to select the SIB block instead of + # a get. + # properties: + # XName: + # description: The component xname used to query the SIB + # type: string + # example: s0 + # UseMsgBus: + # description: Deliver over message bus. Default false. + # type: boolean + # Items: + # description: >- + # The subsections of the SIB to be requested in the query that + # produced this SIB. These will be reflected in the ItemsQueried + # field in the response. This, plus the xname can be used to match up + # a response with the query parameters. + # properties: + # NodeList: + # type: boolean + # HWInventory: + # type: boolean + # HSNType: + # type: boolean + # HSNInfo: + # type: boolean + # type: object + # type: object + # + # SCN Subscriptions + # + Subscriptions_SCNPostSubscription: + type: object + description: >- + This is the JSON payload that contains information to create a new + state change notification subscription + properties: + Subscriber: + description: This is the name of the subscriber. + type: string + example: 'scnfd@sms02.cray.com' + Enabled: + description: >- + This value toggles subscriptions to state change notifications + concerning components being disabled or enabled. 'true' will cause + the subscriber to be notified about components being enabled or + disabled. 'false' or empty will result in no such notifications. + type: boolean + example: true + Roles: + description: >- + This is an array containing component roles for which to be notified + when role changes occur. + type: array + items: + $ref: '#/definitions/HMSRole.1.0.0' + SubRoles: + description: >- + This is an array containing component subroles for which to be notified + when subrole changes occur. + type: array + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + SoftwareStatus: + description: >- + This is an array containing component software statuses for which + to be notified when software status changes occur. + type: array + items: + type: string + description: >- + SoftwareStatus of the node, used by the managed plane for running + nodes. + States: + description: >- + This is an array containing component states for which to be notified + when state changes occur. + type: array + items: + $ref: '#/definitions/HMSState.1.0.0' + Url: + $ref: '#/definitions/Subscriptions_Url' + Subscriptions_SCNPatchSubscription: + type: object + description: >- + This is the JSON payload that contains state change notification + subscription information. + properties: + Op: + description: >- + The type of operation to be performed on the subscription + enum: + - add + - remove + - replace + type: string + example: "add" + Enabled: + description: >- + This value toggles subscriptions to state change notifications + concerning components being disabled or enabled. 'true' will cause + the subscriber to be notified about components being enabled or + disabled. 'false' or empty will result in no such notifications. + type: boolean + example: true + Roles: + description: >- + This is an array containing component roles for which to be notified + when role changes occur. + type: array + items: + $ref: '#/definitions/HMSRole.1.0.0' + SubRoles: + description: >- + This is an array containing component subroles for which to be notified + when subrole changes occur. + type: array + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + SoftwareStatus: + description: >- + This is an array containing component software statuses for which to + be notified when software status changes occur. + type: array + items: + type: string + description: >- + SoftwareStatus of the node, used by the managed plane for running + nodes. + States: + description: >- + This is an array containing component states for which to be notified + when state changes occur. + type: array + items: + $ref: '#/definitions/HMSState.1.0.0' + Subscriptions_SCNSubscriptionArrayItem.1.0.0: + description: 'State change notification subscription JSON payload.' + properties: + ID: + $ref: '#/definitions/Subscription_ID' + Subscriber: + type: string + example: 'scnfd@sms02.cray.com' + Enabled: + description: >- + This value toggles subscriptions to state change notifications + concerning components being disabled or enabled. 'true' will cause + the subscriber to be notified about components being enabled or + disabled. 'false' or empty will result in no such notifications. + type: boolean + example: true + Roles: + description: >- + This is an array containing component roles for which to be notified + when role changes occur. + type: array + items: + $ref: '#/definitions/HMSRole.1.0.0' + SubRoles: + description: >- + This is an array containing component subroles for which to be notified + when subrole changes occur. + type: array + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + SoftwareStatus: + description: >- + This is an array containing component software statuses for which + to be notified when software status changes occur. + type: array + items: + type: string + description: >- + SoftwareStatus of the node, used by the managed plane for running + nodes. + States: + description: >- + This is an array containing component states for which to be notified + when state changes occur. + type: array + items: + $ref: '#/definitions/HMSState.1.0.0' + Url: + $ref: '#/definitions/Subscriptions_Url' + Subscriptions_SCNSubscriptionArray: + description: + List of all currently held state change notification subscriptions. + properties: + SubscriptionList: + type: array + items: + $ref: '#/definitions/Subscriptions_SCNSubscriptionArrayItem.1.0.0' + Subscriptions_Url: + description: 'URL to send notifications to' + type: string + example: 'https://sms02.cray.com:27000/scnfd/v1/scn' + Subscription_ID: + description: >- + This is the ID associated with the subscription that was generated at + its creation. + type: string + example: '42' + Group.1.0.0: + description: >- + A group is an informal, possibly overlapping division of the system + that groups components under an administratively chosen label + (i.e. group name). + Unlike partitions, components can be members of any number of groups. + properties: + label: + description: >- + The label is a human-readable identifier for the group and uniquely + identifies it. + $ref: '#/definitions/ResourceName' # String with format [a-z0-9_-.]+ + description: + description: >- + A one-line, user-provided description of the group. + type: string + tags: + description: + A free-form array of strings to provide extra organization/filtering. + Not to be confused with labels/groups. + type: array + items: + $ref: '#/definitions/ResourceName' # String with format [a-z0-9_-.]+ + exclusiveGroup: + description: >- + If present and non-empty, the group is not allowed to add a member + that exists under a different group/label where the exclusiveGroup + field is the same. This can be used to create groups of groups + where a component may only be present in one of the set. + $ref: '#/definitions/ResourceName' # String with format [a-z0-9_-.]+ + members: + description: >- + The members are a fully enumerated (i.e. no implied members besides + those explicitly provided) representation of the components in the + group + $ref: '#/definitions/Members.1.0.0' + type: object + required: + - label + example: + label: blue + description: This is the blue group + tags: + - optional_tag1 + - optional_tag2 + exclusiveGroup: optional_excl_group + members: + ids: + - x1c0s1b0n0 + - x1c0s1b0n1 + - x1c0s2b0n0 + - x1c0s2b0n1 + Group.1.0.0_Patch: + description: >- + To update the tags array and/or description, a PATCH operation can be + used. If either field is omitted, it will not be updated. + NOTE: This cannot be used to completely replace the members list + Rather, individual members can be removed or added with the POST/DELETE + /members API. + properties: + description: + description: >- + A one-line, user-provided description of the group. + type: string + tags: + description: + A free-form array of strings to provide extra organization/filtering. + Not to be confused with labels/groups. + type: array + items: + $ref: '#/definitions/ResourceName' # String with format [a-z0-9_-.]+ + type: object + example: + description: This is an updated group description + tags: + - new_tag + - existing_tag + Partition.1.0.0: + description: >- + A partition is a formal, non-overlapping division of the system that + forms an administratively distinct sub-system e.g. for implementing + multi-tenancy. + properties: + name: + description: >- + The name is a human-readable identifier for the partition and + uniquely identifies it. + $ref: '#/definitions/ResourceName' # String with format [a-z0-9_-.]+ + description: + description: >- + A one-line, user-provided description of the partition. + type: string + tags: + description: + A free-form array of strings to provide extra organization/filtering. + Not to be confused with labels/groups. + type: array + items: + $ref: '#/definitions/ResourceName' # String with format [a-z0-9_-.]+ + members: + description: >- + The members are a fully enumerated (i.e. no implied members besides + those explicitly provided) representation of the components in the + partition + $ref: '#/definitions/Members.1.0.0' + required: + - name + example: + name: p1 + description: This is partition 1 + tags: + - optional_tag_a + - optional_tag1 + members: + ids: + - x1c0s1b0n0 + - x1c0s1b0n1 + - x2c0s3b0n0 + - x2c0s3b0n1 + Partition.1.0.0_Patch: + description: >- + To update the tags array and/or description, a PATCH operation can be + used. If either field is omitted, it will not be updated. + NOTE: This cannot be used to completely replace the members list + Rather, individual members can be removed or added with the POST/DELETE + /members API. + properties: + description: + description: >- + A one-line, user-provided description of the group. + type: string + tags: + description: + A free-form array of strings to provide extra organization/filtering. + Not to be confused with labels/groups. + type: array + items: + $ref: '#/definitions/ResourceName' # String with format [a-z0-9_-.]+ + type: object + example: + description: This is an updated partition description + tags: + - new_tag + - existing_tag + Members.1.0.0: + description: >- + The members are a fully enumerated (i.e. no implied members besides + those explicitly provided) representation of the components a + partition or group + properties: + ids: + description: >- + Set of Component XName IDs that represent the membership of the + group or partition. + type: array + items: + $ref: '#/definitions/XNameRW.1.0.0' # String with XName format + type: object + example: + ids: + - x1c0s1b0n0 + - x1c0s1b0n1 + - x2c0s3b0n0 + - x2c0s3b0n1 + MemberID: + description: >- + This is used when creating an new entry in a Group or Partition + members array. It is the xname ID of the new member. + properties: + id: + # Uniquely identifies the component by its physical location (xname) + $ref: '#/definitions/XNameRW.1.0.0' + type: object + Membership.1.0.0: + description: >- + A membership is a mapping of a component xname to its set of group + labels and partition names. + properties: + id: + # Uniquely identifies the component by its physical location (xname) + $ref: '#/definitions/XName.1.0.0' + partitionName: + description: >- + The name is a human-readable identifier for the partition and + uniquely identifies it. + type: string + groupLabels: + description: >- + An array with all group labels the component is associated with + The label is the human-readable identifier for a group and uniquely + identifies it. + type: array + items: + type: string + type: object + example: + id: x0c0s22b0n0 + nid: 45 + partitionName: p1 + groupLabels: + - group1 + - group2 + + ########################################################################## + # + # Locking v1 structures. + # + ########################################################################## + Lock.1.0.0: + description: >- + A lock is an object describing a temporary reservation of a set of + components held by an external service. If not removed by the external + service, HSM will automatically remove the lock after its lifetime has + expired. + properties: + id: + description: >- + The ID number of the lock. + type: string + readOnly: true + example: bf9362ad-b29c-40ed-9881-18a5dba3a26b + created: + description: >- + A timestamp for when the lock was created. + format: date-time + type: string + readOnly: true + example: '2019-09-12 03:55:57.000000' + reason: + description: >- + A one-line, user-provided reason for the lock. + type: string + example: For firmware update + owner: + description: >- + A user-provided self identifier for the lock + type: string + example: FUS + lifetime: + description: + The length of time in seconds the component lock should exist before + it is automatically deleted by HSM. + type: integer + example: 90 + xnames: + description: >- + An array of XName/ID values for the components managed by the lock. + These components will have their component flag set to "Locked" upon + lock creation and set to "OK" upon lock deletion. + type: array + items: + $ref: '#/definitions/XNameRW.1.0.0' + example: + - x1c0s1b0n0 + - x1c0s1b0n1 + - x1c0s2b0n0 + - x1c0s2b0n1 + type: object + required: + - owner + - lifetime + - xnames + Lock.1.0.0_Patch: + description: >- + To update the reason, owner, and/or lifetime fields, a PATCH operation can + be used. Omitted fields are not updated. + NOTE: Updating the lifetime field renews the lock. The new expiration time + is the lifetime length AFTER the update. The creation timestamp is + updated. + properties: + reason: + description: >- + A one-line, user-provided reason for the lock. + type: string + example: For firmware update + owner: + description: >- + A user-provided self identifier for the lock (service.JobID) + type: string + example: FUS.25 + lifetime: + description: >- + The length of time in seconds the component lock should exist before + it is automatically deleted by HSM. + type: integer + example: 90 + type: object + ########################################################################## + # + # Locking v2 structures. + # + ########################################################################## + + # Admin + AdminLock.1.0.0: + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to query. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + Partition: + description: >- + Partition name to filter on, as per current /partitions/names + items: + type: string + example: p1 + type: array + Group: + description: >- + Group label to filter on, as per current /groups/labels + items: + type: string + example: group_label + type: array + Type: + description: >- + Retrieve all components with the given HMS type. + items: + type: string + type: array + State: + description: >- + Retrieve all components with the given HMS state. + items: + $ref: '#/definitions/HMSState.1.0.0' + type: array + Flag: + description: >- + Retrieve all components with the given HMS flag value. + items: + $ref: '#/definitions/HMSFlag.1.0.0' + type: array + Enabled: + description: >- + Retrieve all components with the given enabled status (true or false). + items: + type: string + type: array + Softwarestatus: + description: >- + Retrieve all components with the given software status. + Software status is a free form string. Matching is case-insensitive. + items: + type: string + type: array + Role: + description: >- + Retrieve all components (i.e. nodes) with the given HMS role + items: + $ref: '#/definitions/HMSRole.1.0.0' + type: array + Subrole: + description: >- + Retrieve all components (i.e. nodes) with the given HMS subrole + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + type: array + Subtype: + description: >- + Retrieve all components with the given HMS subtype. + items: + type: string + type: array + Arch: + description: >- + Retrieve all components with the given architecture. + items: + $ref: '#/definitions/HMSArch.1.0.0' + type: array + Class: + description: >- + Retrieve all components (i.e. nodes) with the given HMS hardware class. Class can be + River, Mountain, etc. + items: + $ref: '#/definitions/HMSClass.1.0.0' + type: array + NID: + description: >- + Retrieve all components (i.e. one node) with the given integer NID + items: + type: string + type: array + ProcessingModel: + type: string + enum: + - rigid + - flexible + description: Rigid is all or nothing, flexible is best attempt. + type: object + AdminReservationRemove.1.0.0: + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to query. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + Partition: + description: >- + Partition name to filter on, as per current /partitions/names + items: + type: string + example: p1 + type: array + Group: + description: >- + Group label to filter on, as per current /groups/labels + items: + type: string + example: group_label + type: array + Type: + description: >- + Retrieve all components with the given HMS type. + items: + type: string + type: array + State: + description: >- + Retrieve all components with the given HMS state. + items: + $ref: '#/definitions/HMSState.1.0.0' + type: array + Flag: + description: >- + Retrieve all components with the given HMS flag value. + items: + $ref: '#/definitions/HMSFlag.1.0.0' + type: array + Enabled: + description: >- + Retrieve all components with the given enabled status (true or false). + items: + type: string + type: array + Softwarestatus: + description: >- + Retrieve all components with the given software status. + Software status is a free form string. Matching is case-insensitive. + items: + type: string + type: array + Role: + description: >- + Retrieve all components (i.e. nodes) with the given HMS role + items: + $ref: '#/definitions/HMSRole.1.0.0' + type: array + Subrole: + description: >- + Retrieve all components (i.e. nodes) with the given HMS subrole + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + type: array + Subtype: + description: >- + Retrieve all components with the given HMS subtype. + items: + type: string + type: array + Arch: + description: >- + Retrieve all components with the given architecture. + items: + $ref: '#/definitions/HMSArch.1.0.0' + type: array + Class: + description: >- + Retrieve all components (i.e. nodes) with the given HMS hardware class. Class can be + River, Mountain, etc. + items: + $ref: '#/definitions/HMSClass.1.0.0' + type: array + NID: + description: >- + Retrieve all components (i.e. one node) with the given integer NID + items: + type: string + type: array + ProcessingModel: + type: string + enum: + - rigid + - flexible + description: Rigid is all or nothing, flexible is best attempt. + type: object + AdminStatusCheck_Response.1.0.0: + type: object + properties: + Components: + items: + $ref: '#/definitions/ComponentStatus.1.0.0' + type: array + NotFound: + type: array + items: + type: string + example: x1000c0s0b0 + AdminReservationCreate_Response.1.0.0: + type: object + properties: + Success: + items: + $ref: '#/definitions/XnameKeysNoExpire.1.0.0' + type: array + Failure: + type: array + items: + $ref: '#/definitions/FailedXnames.1.0.0' + AdminReservationCreate.1.0.0: + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to query. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + Partition: + description: >- + Partition name to filter on, as per current /partitions/names + items: + type: string + example: p1 + type: array + Group: + description: >- + Group label to filter on, as per current /groups/labels + items: + type: string + example: group_label + type: array + Type: + description: >- + Retrieve all components with the given HMS type. + items: + type: string + type: array + State: + description: >- + Retrieve all components with the given HMS state. + items: + $ref: '#/definitions/HMSState.1.0.0' + type: array + Flag: + description: >- + Retrieve all components with the given HMS flag value. + items: + $ref: '#/definitions/HMSFlag.1.0.0' + type: array + Enabled: + description: >- + Retrieve all components with the given enabled status (true or false). + items: + type: string + type: array + Softwarestatus: + description: >- + Retrieve all components with the given software status. + Software status is a free form string. Matching is case-insensitive. + items: + type: string + type: array + Role: + description: >- + Retrieve all components (i.e. nodes) with the given HMS role + items: + $ref: '#/definitions/HMSRole.1.0.0' + type: array + Subrole: + description: >- + Retrieve all components (i.e. nodes) with the given HMS subrole + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + type: array + Subtype: + description: >- + Retrieve all components with the given HMS subtype. + items: + type: string + type: array + Arch: + description: >- + Retrieve all components with the given architecture. + items: + $ref: '#/definitions/HMSArch.1.0.0' + type: array + Class: + description: >- + Retrieve all components (i.e. nodes) with the given HMS hardware class. Class can be + River, Mountain, etc. + items: + $ref: '#/definitions/HMSClass.1.0.0' + type: array + NID: + description: >- + Retrieve all components (i.e. one node) with the given integer NID + items: + type: string + type: array + ProcessingModel: + type: string + enum: + - rigid + - flexible + description: Rigid is all or nothing, flexible is best attempt. + type: object + # Service + ServiceReservationCreate.1.0.0: + properties: + ComponentIDs: + description: >- + An array of XName/ID values for the components to query. + items: + $ref: '#/definitions/XNameForQuery.1.0.0' + type: array + Partition: + description: >- + Partition name to filter on, as per current /partitions/names + items: + type: string + example: p1 + type: array + Group: + description: >- + Group label to filter on, as per current /groups/labels + items: + type: string + example: group_label + type: array + Type: + description: >- + Retrieve all components with the given HMS type. + items: + type: string + type: array + State: + description: >- + Retrieve all components with the given HMS state. + items: + $ref: '#/definitions/HMSState.1.0.0' + type: array + Flag: + description: >- + Retrieve all components with the given HMS flag value. + items: + $ref: '#/definitions/HMSFlag.1.0.0' + type: array + Enabled: + description: >- + Retrieve all components with the given enabled status (true or false). + items: + type: string + type: array + Softwarestatus: + description: >- + Retrieve all components with the given software status. + Software status is a free form string. Matching is case-insensitive. + items: + type: string + type: array + Role: + description: >- + Retrieve all components (i.e. nodes) with the given HMS role + items: + $ref: '#/definitions/HMSRole.1.0.0' + type: array + Subrole: + description: >- + Retrieve all components (i.e. nodes) with the given HMS subrole + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + type: array + Subtype: + description: >- + Retrieve all components with the given HMS subtype. + items: + type: string + type: array + Arch: + description: >- + Retrieve all components with the given architecture. + items: + $ref: '#/definitions/HMSArch.1.0.0' + type: array + Class: + description: >- + Retrieve all components (i.e. nodes) with the given HMS hardware class. Class can be + River, Mountain, etc. + items: + $ref: '#/definitions/HMSClass.1.0.0' + type: array + NID: + description: >- + Retrieve all components (i.e. one node) with the given integer NID + items: + type: string + type: array + ProcessingModel: + type: string + enum: + - rigid + - flexible + description: Rigid is all or nothing, flexible is best attempt. + ReservationDuration: + type: integer + minimum: 1 + maximum: 15 + description: Length of time in minutes for the reservation to be valid for. + default: 1 + example: 1 + type: object + ServiceReservationCreate_Response.1.0.0: + type: object + properties: + Success: + items: + $ref: '#/definitions/XnameKeys.1.0.0' + type: array + Failure: + type: array + items: + $ref: '#/definitions/FailedXnames.1.0.0' + ServiceReservationCheck_Response.1.0.0: + type: object + properties: + Success: + items: + $ref: '#/definitions/XnameKeysDeputyExpire.1.0.0' + type: array + Failure: + type: array + items: + $ref: '#/definitions/FailedXnames.1.0.0' + + # Generic + Xnames: + type: object + properties: + ComponentIDs: + type: array + items: + type: string + XnameKeysNoExpire.1.0.0: + type: object + properties: + ID: + type: string + DeputyKey: + type: string + description: The key that can be passed to a delegate. + ReservationKey: + type: string + description: The key that can be used to renew/release the reservation. Should not be delegated or shared. + XnameKeys.1.0.0: + type: object + properties: + ID: + type: string + DeputyKey: + type: string + description: The key that can be passed to a delegate. + ReservationKey: + type: string + description: The key that can be used to renew/release the reservation. Should not be delegated or shared. + ExpirationTime: + type: string + format: date-time + XnameKeysDeputyExpire.1.0.0: + type: object + properties: + ID: + type: string + DeputyKey: + type: string + description: The key that can be passed to a delegate. + ExpirationTime: + type: string + format: date-time + XnameWithKey.1.0.0: + type: object + properties: + ID: + type: string + Key: + type: string + DeputyKeys.1.0.0: + type: object + properties: + DeputyKeys: + type: array + items: + $ref: '#/definitions/XnameWithKey.1.0.0' + ReservedKeys.1.0.0: + type: object + properties: + ReservationKeys: + type: array + items: + $ref: '#/definitions/XnameWithKey.1.0.0' + ProcessingModel: + type: string + enum: + - rigid + - flexible + description: Rigid is all or nothing, flexible is best attempt. + ReservedKeysWithRenewal.1.0.0: + type: object + properties: + ReservationKeys: + type: array + items: + $ref: '#/definitions/XnameWithKey.1.0.0' + ProcessingModel: + type: string + enum: + - rigid + - flexible + description: Rigid is all or nothing, flexible is best attempt. + ReservationDuration: + type: integer + minimum: 1 + maximum: 15 + description: Length of time in minutes for the reservation to be valid for. + default: 1 + example: 1 + Counts.1.0.0: + type: object + properties: + Total: + type: integer + Success: + type: integer + Failure: + type: integer + FailedXnames.1.0.0: + type: object + properties: + ID: + type: string + Reason: + type: string + enum: + - NotFound + - Locked + - Disabled + - Reserved + - ServerError + description: The key that can be passed to a delegate. + ComponentStatus.1.0.0: + type: object + properties: + ID: + type: string + example: x1001c0s0b0 + Locked: + type: boolean + example: false + Reserved: + type: boolean + example: true + CreatedTime: + type: string + format: date-time + ExpirationTime: + type: string + format: date-time + ReservationDisabled: + type: boolean + example: false + XnameResponse_1.0.0: + description: >- + This is a simple CAPMC-like response, intended mainly for + non-error messages. For client errors, we now use RFC7807 responses. + type: object + required: + - code + - message + properties: + Counts: + $ref: '#/definitions/Counts.1.0.0' + Success: + $ref: '#/definitions/Xnames' + Failure: + type: array + items: + $ref: '#/definitions/FailedXnames.1.0.0' + + + ########################################################################## + # + # Power Map structures - Component to power supply mapping. + # + ########################################################################## + PowerMap.1.0.0_PowerMap: + description: >- + PowerMaps used to show which components are powered by which power + supplies. + properties: + id: + description: >- + The component this PowerMap is for. + $ref: '#/definitions/XName.1.0.0' + poweredBy: + description: >- + A list of components that supply this component with power. + items: + $ref: '#/definitions/XNameRW.1.0.0' + type: array + required: + - poweredBy + type: object + example: + id: x0c0s1b0n0 + poweredBy: + - x0m0p0j10 + - x0m0p0j11 + PowerMap.1.0.0_PostPowerMap: + description: >- + PowerMaps used to show which components are powered by which power + supplies. + properties: + id: + description: >- + The component this PowerMap is for. + $ref: '#/definitions/XNameRW.1.0.0' + poweredBy: + description: >- + A list of components that supply this component with power. + items: + $ref: '#/definitions/XNameRW.1.0.0' + type: array + required: + - id + - poweredBy + type: object + example: + id: x0c0s1b0n0 + poweredBy: + - x0m0p0j10 + - x0m0p0j11 + PowerMapArray_PowerMapArray: + description: >- + This is an array of PowerMap objects. This is the result of + GET-ing the PowerMaps collection, or can be used to populate or + update it as input provided via POST. + items: + $ref: '#/definitions/PowerMap.1.0.0_PostPowerMap' + type: array + ########################################################################## + # + # Service Values Response Structures + # + ########################################################################## + Values.1.0.0_Values: + description: >- + This is a list of parameters and their valid values. These + values are valid for various parameters in this API. + allOf: + - $ref: '#/definitions/Values.1.0.0_ArchArray' + - $ref: '#/definitions/Values.1.0.0_ClassArray' + - $ref: '#/definitions/Values.1.0.0_FlagArray' + - $ref: '#/definitions/Values.1.0.0_NetTypeArray' + - $ref: '#/definitions/Values.1.0.0_RoleArray' + - $ref: '#/definitions/Values.1.0.0_SubRoleArray' + - $ref: '#/definitions/Values.1.0.0_StateArray' + - $ref: '#/definitions/Values.1.0.0_TypeArray' + Values.1.0.0_ArchArray: + description: >- + This is an array of valid HMSArch values. These values are valid for + any 'arch' parameter in this API. + properties: + Arch: + items: + $ref: '#/definitions/HMSArch.1.0.0' + type: array + Values.1.0.0_ClassArray: + description: >- + This is an array of valid HMSClass values. These values are valid for + any 'class' parameter in this API. + properties: + Class: + items: + $ref: '#/definitions/HMSClass.1.0.0' + type: array + Values.1.0.0_FlagArray: + description: >- + This is an array of valid HMSFlag values. These values are valid for + any 'flag' parameter in this API. + properties: + Flag: + items: + $ref: '#/definitions/HMSFlag.1.0.0' + type: array + Values.1.0.0_NetTypeArray: + description: >- + This is an array of valid NetType values. These values are valid for + any 'nettype' parameter in this API. + properties: + NetType: + items: + $ref: '#/definitions/NetType.1.0.0' + type: array + Values.1.0.0_RoleArray: + description: >- + This is an array of valid HMSRole values. These values are valid for + any 'role' parameter in this API. + properties: + Role: + items: + $ref: '#/definitions/HMSRole.1.0.0' + type: array + Values.1.0.0_SubRoleArray: + description: >- + This is an array of valid HMSSubRole values. These values are valid for + any 'subrole' parameter in this API. + properties: + SubRole: + items: + $ref: '#/definitions/HMSSubRole.1.0.0' + type: array + Values.1.0.0_StateArray: + description: >- + This is an array of valid HMSState values. These values are valid for + any 'state' parameter in this API. + properties: + State: + items: + $ref: '#/definitions/HMSState.1.0.0' + type: array + Values.1.0.0_TypeArray: + description: >- + This is an array of valid HMSType values. These values are valid for + any 'type' parameter in this API. + properties: + Type: + items: + $ref: '#/definitions/HMSType.1.0.0' + type: array + ########################################################################## + # + # General definitions + # + ########################################################################## + Actions_1.0.0_ChassisActions: + description: >- + This is a pass-through field from Redfish that lists the available + actions for a Chassis component (if any were found, else if it + be omitted entirely). + properties: + '#Chassis.Reset': + properties: + "ResetType@Redfish.AllowableValues": + description: List of allowable 'reset' Redfish Action types + items: + type: string + type: array + example: [ "On", "ForceOff" ] + target: + description: target URI for Redfish Action + type: string + example: /redfish/v1/Chassis/RackEnclosure/Actions/Chassis.Reset + type: object + type: object + readOnly: true + Actions_1.0.0_ComputerSystemActions: + description: >- + This is a pass-through field from Redfish that lists the available + actions for a System component (if any were found, else if it + be omitted entirely). + properties: + '#ComputerSystem.Reset': + properties: + "ResetType@Redfish.AllowableValues": + description: List of allowable 'reset' Redfish Action types + items: + type: string + type: array + example: [ "On", "ForceOff", "ForceRestart" ] + target: + description: target URI for Redfish Action + type: string + example: /redfish/v1/Systems/System.1/Actions/ComputerSystem.Reset + type: object + type: object + readOnly: true + Actions_1.0.0_ManagerActions: + description: >- + This is a pass-through field from Redfish that lists the available + actions for a Manager component (if any were found, else if it + be omitted entirely). + properties: + '#Manager.Reset': + properties: + "ResetType@Redfish.AllowableValues": + description: List of allowable 'reset' Redfish Action types + items: + type: string + type: array + example: [ "ForceRestart" ] + target: + description: target URI for Redfish Action + type: string + example: /redfish/v1/Managers/BMC/Actions/Manager.Reset + type: object + type: object + readOnly: true + Actions_1.0.0_OutletActions: + description: >- + This is a pass-through field from Redfish that lists the available + actions for a Outlet component (if any were found, else if it + be omitted entirely). + properties: + '#Outlet.PowerControl': + properties: + "PowerControl@Redfish.AllowableValues": + description: List of allowable PowerControl Redfish Action types + items: + type: string + type: array + example: [ "On" ] + target: + description: target URI for Redfish Action + type: string + example: + "/redfish/v1/PowerEquipment/RackPDUs/1/Outlets/A1\ + /Outlet.PowerControl" + type: object + '#Outlet.ResetBreaker': + properties: + "ResetBreaker@Redfish.AllowableValues": + description: List of allowable ResetBreaker Redfish Action types + items: + type: string + type: array + example: [ "Off" ] + target: + description: target URI for Redfish Action + type: string + example: + "/redfish/v1/PowerEquipment/RackPDUs/1/Outlets/A1\ + /Outlet.ResetBreaker" + type: object + '#Outlet.ResetStatistics': + properties: + "ResetStatistics@Redfish.AllowableValues": + description: + List of allowable ResetStatistics Redfish Action types + items: + type: string + type: array + target: + description: target URI for Redfish Action + type: string + example: + "/redfish/v1/PowerEquipment/RackPDUs/1/Outlets/A1\ + /Outlet.ResetStatistics" + type: object + type: object + readOnly: true + Message_1.0.0_ExtendedInfo: + # TODO: Generalize this so we use the same messages/errors throughout + # HMS and via outside interfaces such as CMBus. They should be ReSTy + # etc. + description: >- + TODO This is a general message scheme meant to replace and generalize + old HSS error codes. Largely TBD placeholder. + properties: + ID: + description: Formal, machine readable, name for message. + type: string + Message: + description: Human readable description of message. + type: string + Flag: + # OK, Warning, or Alert - Indicates message, warning, or error. + $ref: '#/definitions/HMSFlag.1.0.0' + type: object + EthernetNICInfo_1.0.0: + description: >- + This is a summary info for one ordinary Ethernet NIC (i.e. not on HSN). + These fields are all passed through from a Redfish EthernetInterface + object. + properties: + RedfishId: + description: The Redfish 'Id' field for the interface. + type: string + readOnly: true + example: 1 + "@odata.id": + description: >- + This is the relative path to the EthernetInterface via the + Redfish entry point. (i.e. the @odata.id field). + type: string + readOnly: true + example: + /redfish/v1/{Chassis/Managers/Systems}/{Id}/EthernetInterfaces/1 + Description: + description: The Redfish 'Description' field for the interface. + type: string + readOnly: true + example: Integrated NIC 1 + FQDN: + description: >- + The Redfish 'FQDN' of the interface. This may or may not be set + and is not necessarily the same as the FQDN of the ComponentEndpoint. + type: string + readOnly: true + Hostname: + description: >- + The Redfish 'Hostname field' for the interface. This may or may + not be set and is not necessarily the same as the Hostname of the + ComponentEndpoint. + type: string + readOnly: true + InterfaceEnabled: + description: >- + The Redfish 'InterfaceEnabled' field if provided by Redfish, else + it will be omitted. + type: boolean + readOnly: true + MACAddress: + description: >- + The Redfish 'MacAddress' field for the interface. This should + normally be set but is not necessarily the same as the MacAddr of + the ComponentEndpoint (as there may be multiple interfaces). + pattern: '^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$' + type: string + example: ae:12:ce:7a:aa:99 + PermanentMACAddress: + description: >- + The Redfish 'PermanentMacAddress' field for the interface. This may + or may not be set and is not necessarily the same as the MacAddr + of the ComponentEndpoint (as there may be multiple interfaces). + pattern: '^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$' + type: string + example: ae:12:ce:7a:aa:99 + type: object + PowerControl_1.0.0: + description: >- + This is the power control info for the node. These fields are all passed + through from a Redfish PowerControl object. + properties: + Name: + description: Name of the power control interface. + type: string + readOnly: true + example: Node Power Control + PowerCapacityWatts: + description: >- + The total amount of power available to the chassis for allocation. + This may the power supply capacity, or power budget assigned to the + chassis from an up-stream chassis. + type: number + readOnly: true + example: 900 + OEM: + description: >- + This is the manufacturer/provider specific extension moniker used to + divide the Oem object into sections. + type: object + readOnly: true + properties: + Cray: + description: >- + This is the manufacturer/provider specific extension moniker used to + divide the Oem object into sections. + type: object + readOnly: true + properties: + PowerIdleWatts: + description: >- + The total amount of power available to the chassis for allocation. + This may the power supply capacity, or power budget assigned to the + chassis from an up-stream chassis. + type: number + readOnly: true + example: 900 + PowerLimit: + description: >- + Power limit status and configuration information for this chassis. + type: object + readOnly: true + properties: + Min: + description: >- + The minimum allowed value for a PowerLimit's LimitInWatts. This is + the estimated lowest value (most restrictive) power cap that can be + achieved by the associated PowerControl resource. + type: number + readOnly: true + example: 350 + Max: + description: >- + The maximum allowed value for a PowerLimit's LimitInWatts. This is + the estimated highest value (least restrictive) power cap that can + be achieved by the associated PowerControl resource. Note that the + actual maximum allowed LimitInWatts is the lesser of PowerLimit.Max + or PowerControl.PowerAllocatedWatts. + type: number + readOnly: true + example: 850 + PowerResetWatts: + description: >- + Typical power consumption during ComputerSystem.ResetAction "On" operation. + type: number + readOnly: true + example: 250 + RelatedItem: + description: >- + The ID(s) of the resources associated with this Power Limit. + type: array + items: + type: object + properties: + "@odata.id": + description: >- + An ID of the resource associated with this Power Limit. + type: string + readOnly: true + example: /redfish/v1/Chassis/Node0/Power#/PowerControl/Accelerator0 + readOnly: true + type: object + FRUId.1.0.0: + description: >- + Uniquely identifies a piece of hardware by a serial-number like + identifier that is globally unique within the hardware inventory, + type: string + readOnly: true + HMSArch.1.0.0: + description: >- + This is the basic architecture of the component so the proper software + can be selected and so on. + enum: + - X86 + - ARM + - Other + type: string + example: X86 + HMSClass.1.0.0: + description: >- + This is the HSM hardware class of the component. + enum: + - River + - Mountain + - Hill + type: string + example: River + HMSFlag.1.0.0: + description: >- + This property indicates the state flag of the underlying component. + enum: + - OK + - Warning + - Alert + - Locked + type: string + example: OK + HMSRole.1.0.0: + description: >- + This is a possibly reconfigurable role for a component, especially a node. + Valid values are: + + - Compute + + - Service + + - System + + - Application + + - Storage + + - Management + + Additional valid values may be added via configuration file. + See the results of 'GET /service/values/role' for the complete list. + type: string + example: Compute + HMSSubRole.1.0.0: + description: >- + This is a possibly reconfigurable subrole for a component, especially a node. + Valid values are: + + - Master + + - Worker + + - Storage + + Additional valid values may be added via configuration file. + See the results of 'GET /service/values/subrole' for the complete list. + type: string + example: Worker + HMSState.1.0.0: + description: >- + This property indicates the state of the underlying component. + enum: + - Unknown + - Empty + - Populated + - "Off" + - "On" + - Standby + - Halt + - Ready + type: string + example: Ready + HMSType.1.0.0: + description: >- + This is the HMS component type category. It has a particular xname + format and represents the kind of component that can occupy that + location. Not to be confused with RedfishType which is Redfish + specific and only used when providing Redfish endpoint data from + discovery. + enum: + - CDU + - CabinetCDU + - CabinetPDU + - CabinetPDUOutlet + - CabinetPDUPowerConnector + - CabinetPDUController + - Cabinet + - Chassis + - ChassisBMC + - CMMRectifier + - CMMFpga + - CEC + - ComputeModule + - RouterModule + - NodeBMC + - NodeEnclosure + - NodeEnclosurePowerSupply + - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch + - Node + - Processor + - Drive + - StorageGroup + - NodeNIC + - Memory + - NodeAccel + - NodeAccelRiser + - NodeFpga + - HSNAsic + - RouterFpga + - RouterBMC + - HSNLink + - HSNConnector + - INVALID + type: string + readOnly: true + example: Node + NetType.1.0.0: + description: >- + This is the type of high speed network the component is connected to, + if it is an applicable component type and the interface is present, or + the type of the system HSN. + enum: + - Sling + - Infiniband + - Ethernet + - OEM + - None + type: string + example: Sling + NIDRange.1.0.0: + description: >- + NID range values to query matching components, e.g. "0-24". Supply + only a single range, more can be given in an array of these values. + type: string + example: "0-24" + NICAddrs.1.0.0: + description: A collection of HSN NIC addresses in string form. + items: + type: string + type: array + example: [ 0x234e12, 0xaf3f12 ] + OdataID.1.0.0: + description: >- + This is the path (relative to a Redfish endpoint) of a particular + Redfish resource, e.g. /Redfish/v1/Systems/System.Embedded.1 + type: string + readOnly: true + example: /redfish/v1/Systems/System.Embedded.1 + Problem7807: + description: >- + RFC 7807 compliant error payload. All fields are optional except + the 'type' field. + type: object + required: + - type + properties: + type: + type: string + example: about:blank + detail: + type: string + example: Detail about this specific problem occurrence. See RFC7807 + instance: + type: string + example: "" + status: + type: number + format: int32 + example: 400 + title: + type: string + example: Description of HTTP Status code, e.g. 400 + RedfishType.1.0.0: + description: >- + This is the Redfish object type, not to be confused with the HMS + component type. + enum: + - Chassis + - ComputerSystem + - EthernetInterface + - Manager + - Memory + - Processor + - Drive + - PowerSupply + - AccountService + - EventService + - LogService + - SessionService + - TaskService + - UpdateService + type: string + readOnly: true + example: ComputerSystem + RedfishSubtype.1.0.0: + description: >- + This is the type corresponding to the Redfish object type, i.e. the + ChassisType field, SystemType, ManagerType fields. We only use + these three types to create ComponentEndpoints for now. + enum: + - Rack + - Blade + - Enclosure + - StandAlone + - RackMount + - Card + - Cartridge + - Row + - Pod + - Expansion + - Sidecar + - Zone + - Sled + - Shelf + - Drawer + - Module + - Component + - Other + - Physical + - Virtual + - OS + - PhysicallyPartitioned + - VirtuallyPartitioned + - ManagementController + - EnclosureManager + - BMC + - RackManager + - AuxiliaryController + type: string + readOnly: true + example: Physical + ResourceName: + description: >- + Acceptable format for certain user-requested string identifiers. + type: string + pattern: '^[0-9a-f_\-.]{1,}$' + example: resource_name1 + ResourceURI.1.0.0: + description: >- + A ResourceURI is like an odata.id, it provides a path to a resource + from the API root, such that when a GET is performed, the corresponding + object is returned. It does not imply other odata functionality. + type: object + properties: + ResourceURI: + type: string + example: /hsm/v2/API_TYPE/OBJECT_TYPE/OBJECT_ID + ResourceURICollection_ResourceURICollection: + description: >- + A ResourceURI is like an odata.id, it provides a path to a resource + from the API root, such that when a GET is performed, the corresponding + object is returned. It does not imply other odata functionality. This + is a collection of such IDs, of a single base type, grouped together + for some purpose. + properties: + Name: + description: >- + Should describe the collection, though the type of resources + the links correspond to should also be inferred from the context + in which the collection was obtained. + type: string + readOnly: true + example: (Type of Object) Collection + Members: + description: An array of ResourceIds. + items: + $ref: '#/definitions/ResourceURI.1.0.0' + type: array + readOnly: true + MemberCount: + description: Number of ResourceURIs in the collection + type: number + format: int32 + readOnly: true + type: object + Response_1.0.0: + description: >- + This is a simple CAPMC-like response, intended mainly for + non-error messages. For client errors, we now use RFC7807 responses. + type: object + required: + - code + - message + properties: + code: + type: string + message: + type: string + UUID.1.0.0: + description: >- + This is a universally unique identifier i.e. UUID in the canonical + format provided by Redfish to identify endpoints and services. + If this is the UUID of a RedfishEndpoint, it should be the UUID + broadcast by SSDP, if applicable. + pattern: '([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})' + type: string + readOnly: true + example: bf9362ad-b29c-40ed-9881-18a5dba3a26b + XName.1.0.0: + description: >- + Uniquely identifies the component by its physical location (xname). + There are formatting rules depending on the matching HMSType. + type: string + example: x0c0s0b0n0 + readOnly: true + XNameRW.1.0.0: + description: >- + Uniquely identifies the component by its physical location (xname). + There are formatting rules depending on the matching HMSType. + This is the non-readOnly version for writable component lists. + type: string + example: x0c0s1b0n0 + XNameRFEndpoint.1.0.0: + description: >- + Uniquely identifies the component by its physical location (xname). + This is identical to a normal XName, but specifies a case where + a BMC or other controller type is expected. + type: string + example: x0c0s0b0 + XNameForQuery.1.0.0: + description: >- + Uniquely identifies the component by its physical location (xname). + There are formatting rules depending on the matching HMSType. + This is identical to XName except that it is not read-only which + would prevent it from being a required parameter in query + operations in Swagger 2.0. These operations do not actually write + the XName, merely using at a selector to do bulk writes of multiple + records, so this is fine. + type: string + example: x0c0s0b0n0 + XNamePartition.1.0.0: + description: >- + This is an ordinary xname, but one where only a partition (hard:soft) + or the system alias (s0) will be expected as valid input. + type: string + example: p1.2 + XNameCompOrPartition.1.0.0: + description: >- + This is an ordinary xname, but one where only a partition (hard:soft) + or the system alias (s0) will be expected as valid input, or else + a parent component. + type: string + example: s0 +parameters: + compIDParam: + name: id + in: query + type: string + #TODO: Define these kind of parameters as arrays with + # collectionFormat=multi once craycli can handle it correctly. + # type: array + # items: + # type: string + # collectionFormat: multi + description: >- + Filter the results based on xname ID(s). Can be specified multiple times + for selecting entries with multiple specific xnames. + compTypeParam: + name: type + in: query + type: string + description: >- + Filter the results based on HMS type like Node, NodeEnclosure, NodeBMC etc. + Can be specified multiple times for selecting entries of multiple types. + enum: + - CDU + - CabinetCDU + - CabinetPDU + - CabinetPDUOutlet + - CabinetPDUPowerConnector + - CabinetPDUController + - Cabinet + - Chassis + - ChassisBMC + - CMMRectifier + - CMMFpga + - CEC + - ComputeModule + - RouterModule + - NodeBMC + - NodeEnclosure + - NodeEnclosurePowerSupply + - HSNBoard + - MgmtSwitch + - MgmtHLSwitch + - CDUMgmtSwitch + - Node + - Processor + - Drive + - StorageGroup + - NodeNIC + - Memory + - NodeAccel + - NodeAccelRiser + - NodeFpga + - HSNAsic + - RouterFpga + - RouterBMC + - HSNLink + - HSNConnector + - INVALID + compStateParam: + name: state + in: query + type: string + description: >- + Filter the results based on HMS state like Ready, On etc. + Can be specified multiple times for selecting entries in different states. + enum: + - Unknown + - Empty + - Populated + - "Off" + - "On" + - Standby + - Halt + - Ready + compFlagParam: + name: flag + in: query + type: string + description: >- + Filter the results based on HMS flag value like OK, Alert etc. + Can be specified multiple times for selecting entries with different flags. + enum: + - OK + - Warning + - Alert + - Locked + - Unknown + compRoleParam: + name: role + in: query + type: string + description: >- + Filter the results based on HMS role. Can be specified multiple + times for selecting entries with different roles. + Valid values are: + + - Compute + + - Service + + - System + + - Application + + - Storage + + - Management + + Additional valid values may be added via configuration file. + See the results of 'GET /service/values/role' for the complete list. + compSubroleParam: + name: subrole + in: query + type: string + description: >- + Filter the results based on HMS subrole. Can be specified multiple + times for selecting entries with different subroles. + Valid values are: + + - Master + + - Worker + + - Storage + + Additional valid values may be added via configuration file. + See the results of 'GET /service/values/subrole' for the complete list. + compEnabledParam: + name: enabled + in: query + type: string + description: >- + Filter the results based on enabled status (true or false). + compSoftwareStatusParam: + name: softwarestatus + in: query + type: string + description: >- + Filter the results based on software status. Software status is a free form string. + Matching is case-insensitive. Can be specified multiple times for selecting entries + with different software statuses. + compSubtypeParam: + name: subtype + in: query + type: string + description: >- + Filter the results based on HMS subtype. Can be specified multiple times for selecting + entries with different subtypes. + compArchParam: + name: arch + in: query + type: string + description: >- + Filter the results based on architecture. Can be specified multiple times for + selecting components with different architectures. + enum: + - X86 + - ARM + - Other + - Unknown + compClassParam: + name: class + in: query + type: string + description: >- + Filter the results based on HMS hardware class. Can be specified multiple times for + selecting entries with different classes. + enum: + - River + - Mountain + - Hill + compNIDParam: + name: nid + in: query + type: string + description: >- + Filter the results based on NID. Can be specified multiple times for + selecting entries with multiple specific NIDs. + compNIDStartParam: + name: nid_start + in: query + type: string + description: >- + Filter the results based on NIDs equal to or greater than the provided integer. + compNIDEndParam: + name: nid_end + in: query + type: string + description: >- + Filter the results based on NIDs less than or equal to the provided integer. + compPartitionParam: + name: partition + in: query + type: string + description: >- + Restrict search to the given partition (p#.#). One partition can be + combined with at most one group argument which will be treated + as a logical AND. NULL will return components in NO partition. + compGroupParam: + name: group + in: query + type: string + description: >- + Restrict search to the given group label. One group can be + combined with at most one partition argument which will be treated + as a logical AND. NULL will return components in NO groups. + + diff --git a/pkg/sls-client/.swagger-codegen-ignore b/pkg/sls-client/.swagger-codegen-ignore index 0055a2cc..0d77219b 100644 --- a/pkg/sls-client/.swagger-codegen-ignore +++ b/pkg/sls-client/.swagger-codegen-ignore @@ -22,7 +22,9 @@ # Then explicitly reverse the ignore rule for a single file: #!docs/README.md +api_dumpstate.go model_hardware.go model_hardware_class.go model_hardware_extra_properties.go -model_hardware_type_string.go \ No newline at end of file +model_hardware_extra_properties_cabinet.go +model_hardware_type_string.go diff --git a/pkg/sls-client/README.md b/pkg/sls-client/README.md index af3f4d45..7b793378 100644 --- a/pkg/sls-client/README.md +++ b/pkg/sls-client/README.md @@ -56,6 +56,7 @@ Class | Method | HTTP request | Description - [HardwareExtraPropertiesCabinet](docs/HardwareExtraPropertiesCabinet.md) - [HardwareExtraPropertiesCabinetNetworks](docs/HardwareExtraPropertiesCabinetNetworks.md) - [HardwareExtraPropertiesCduMgmtSwitch](docs/HardwareExtraPropertiesCduMgmtSwitch.md) + - [HardwareExtraPropertiesChassis](docs/HardwareExtraPropertiesChassis.md) - [HardwareExtraPropertiesChassisBmc](docs/HardwareExtraPropertiesChassisBmc.md) - [HardwareExtraPropertiesCompmod](docs/HardwareExtraPropertiesCompmod.md) - [HardwareExtraPropertiesCompmodPowerConnector](docs/HardwareExtraPropertiesCompmodPowerConnector.md) @@ -70,6 +71,7 @@ Class | Method | HTTP request | Description - [HardwareExtraPropertiesRtrBmc](docs/HardwareExtraPropertiesRtrBmc.md) - [HardwareExtraPropertiesRtrBmcNic](docs/HardwareExtraPropertiesRtrBmcNic.md) - [HardwareExtraPropertiesRtrmod](docs/HardwareExtraPropertiesRtrmod.md) + - [HardwareExtraPropertiesSystem](docs/HardwareExtraPropertiesSystem.md) - [HardwarePost](docs/HardwarePost.md) - [HardwarePut](docs/HardwarePut.md) - [HardwareType](docs/HardwareType.md) diff --git a/pkg/sls-client/api/swagger.yaml b/pkg/sls-client/api/swagger.yaml index 9b2b7c25..9137e782 100644 --- a/pkg/sls-client/api/swagger.yaml +++ b/pkg/sls-client/api/swagger.yaml @@ -1047,6 +1047,15 @@ components: - x0c0s0b0n0 LastUpdated: 0 TypeString: CDU + hardware_extra_properties_system: + type: object + properties: + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string + '@cani.id': + type: string hardware_extra_properties_bmc_nic: required: - IP4addr @@ -1055,6 +1064,12 @@ components: - Username type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string IP6addr: type: string description: "The ipv6 address that should be assigned to this BMC, or \"\ @@ -1081,6 +1096,12 @@ components: - Peers type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string Networks: type: array description: An array of network names that this NIC is connected to @@ -1097,11 +1118,23 @@ components: - PoweredBy type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string PoweredBy: $ref: '#/components/schemas/xname' hardware_extra_properties_cabinet: type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string Model: type: string Networks: @@ -1128,6 +1161,12 @@ components: hardware_extra_properties_cdu_mgmt_switch: type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string Brand: type: string Model: @@ -1136,9 +1175,24 @@ components: type: array items: type: string + hardware_extra_properties_chassis: + type: object + properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string hardware_extra_properties_chassis_bmc: type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string Aliases: type: array items: @@ -1148,6 +1202,12 @@ components: - PowerConnector type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string PowerConnector: type: array description: "An array of xnames, where each xname has type==*_pwr_connector.\ @@ -1159,11 +1219,23 @@ components: - PoweredBy type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string PoweredBy: $ref: '#/components/schemas/xname' hardware_extra_properties_mgmt_hl_switch: type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string IP6addr: type: string IP4addr: @@ -1191,6 +1263,12 @@ components: - NodeNics type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string NodeNics: type: array description: An array of xnames that this connector is connected to. All @@ -1200,6 +1278,12 @@ components: hardware_extra_properties_mgmt_switch: type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string IP6addr: type: string IP4addr: @@ -1227,6 +1311,12 @@ components: - NodeNics type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string NodeNics: type: array description: An array of xnames that the hardware_mgmt_switch_connector @@ -1241,6 +1331,12 @@ components: hardware_extra_properties_ncard: type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string IP6addr: type: string description: "The ipv6 address that should be assigned to this BMC, or \"\ @@ -1267,6 +1363,12 @@ components: - Role type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string NID: minimum: 0 type: integer @@ -1287,6 +1389,12 @@ components: - Peers type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string Networks: type: array description: An array of network names that this NIC is connected to @@ -1304,6 +1412,12 @@ components: - Peers type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string Networks: type: array description: An array of network names that this NIC is connected to @@ -1321,6 +1435,12 @@ components: - IP6addr type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string IP6addr: type: string description: "The ipv6 address that should be assigned to this BMC, or \"\ @@ -1347,6 +1467,12 @@ components: - Peers type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string Networks: type: array description: An array of network names that this NIC is connected to @@ -1363,6 +1489,12 @@ components: - PowerConnector type: object properties: + '@cani.id': + type: string + '@cani.lastModified': + type: string + '@cani.slsSchemaVersion': + type: string PowerConnector: type: array description: "An array of xnames, where each xname has type==*_pwr_connector.\ @@ -1390,6 +1522,7 @@ components: - $ref: '#/components/schemas/hardware_extra_properties_rtr_bmc' - $ref: '#/components/schemas/hardware_extra_properties_rtr_bmc_nic' - $ref: '#/components/schemas/hardware_extra_properties_rtrmod' + - $ref: '#/components/schemas/hardware_extra_properties_system' slsState: type: object properties: diff --git a/pkg/sls-client/api_dumpstate.go b/pkg/sls-client/api_dumpstate.go index 5d313e7e..c941b87f 100644 --- a/pkg/sls-client/api_dumpstate.go +++ b/pkg/sls-client/api_dumpstate.go @@ -82,6 +82,7 @@ func (a *DumpstateApiService) DumpstateGet(ctx context.Context) (SlsState, *http if err != nil { return localVarReturnValue, localVarHttpResponse, err } + // This code was manually added // start localVarHttpResponse.Body = io.NopCloser(bytes.NewReader(localVarBody)) diff --git a/pkg/sls-client/client.go b/pkg/sls-client/client.go index e8450291..b7862aae 100644 --- a/pkg/sls-client/client.go +++ b/pkg/sls-client/client.go @@ -41,6 +41,7 @@ var ( type APIClient struct { cfg *Configuration common service // Reuse a single struct instead of allocating one for each service on the heap. + // API Services CliFromFileApi *CliFromFileApiService diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesBmcNic.md b/pkg/sls-client/docs/HardwareExtraPropertiesBmcNic.md index 8354a771..12d140ff 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesBmcNic.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesBmcNic.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **IP6addr** | **string** | The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed. | [default to null] **IP4addr** | **string** | The ipv4 address that should be assigned to this BMC, or \"DHCPv4\". If omitted, \"DHCPv4\" is assumed. | [default to null] **Username** | **string** | The username that should be used to access the device (or be assigned to the device) | [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCabPduNic.md b/pkg/sls-client/docs/HardwareExtraPropertiesCabPduNic.md index 96e0b460..675b75b6 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesCabPduNic.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCabPduNic.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **Networks** | **[]string** | An array of network names that this NIC is connected to | [default to null] **Peers** | **[]string** | An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches | [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCabPduPwrConnector.md b/pkg/sls-client/docs/HardwareExtraPropertiesCabPduPwrConnector.md index ac1de3fd..4ddd76c4 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesCabPduPwrConnector.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCabPduPwrConnector.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **PoweredBy** | **string** | | [default to null] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCabinet.md b/pkg/sls-client/docs/HardwareExtraPropertiesCabinet.md index a14114d6..7f7551f9 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesCabinet.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCabinet.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **Model** | **string** | | [optional] [default to null] **Networks** | [**map[string]HardwareExtraPropertiesCabinetNetworks**](hardware_extra_properties_cabinet_networks.md) | | [optional] [default to null] **DHCPRelaySwitches** | **[]string** | | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCabinetNetworksMap.md b/pkg/sls-client/docs/HardwareExtraPropertiesCabinetNetworksMap.md new file mode 100644 index 00000000..60ef0f3e --- /dev/null +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCabinetNetworksMap.md @@ -0,0 +1,10 @@ +# HardwareExtraPropertiesCabinetNetworksMap + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**HMN** | [***HardwareExtraPropertiesCabinetNetworks**](hardware_extra_properties_cabinet_networks.md) | | [optional] [default to null] +**NMN** | [***HardwareExtraPropertiesCabinetNetworks**](hardware_extra_properties_cabinet_networks.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCabinetNetworksNcn.md b/pkg/sls-client/docs/HardwareExtraPropertiesCabinetNetworksNcn.md new file mode 100644 index 00000000..737e91ab --- /dev/null +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCabinetNetworksNcn.md @@ -0,0 +1,10 @@ +# HardwareExtraPropertiesCabinetNetworksNcn + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**HMN** | [***HardwareExtraPropertiesCabinetNetworks**](hardware_extra_properties_cabinet_networks.md) | | [optional] [default to null] +**NMN** | [***HardwareExtraPropertiesCabinetNetworks**](hardware_extra_properties_cabinet_networks.md) | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCduMgmtSwitch.md b/pkg/sls-client/docs/HardwareExtraPropertiesCduMgmtSwitch.md index 8289077d..7043598a 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesCduMgmtSwitch.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCduMgmtSwitch.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **Brand** | **string** | | [optional] [default to null] **Model** | **string** | | [optional] [default to null] **Aliases** | **[]string** | | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesChassis.md b/pkg/sls-client/docs/HardwareExtraPropertiesChassis.md new file mode 100644 index 00000000..b9cff6c5 --- /dev/null +++ b/pkg/sls-client/docs/HardwareExtraPropertiesChassis.md @@ -0,0 +1,11 @@ +# HardwareExtraPropertiesChassis + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesChassisBmc.md b/pkg/sls-client/docs/HardwareExtraPropertiesChassisBmc.md index 644e0fa3..f545c50d 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesChassisBmc.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesChassisBmc.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **Aliases** | **[]string** | | [optional] [default to null] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCompmod.md b/pkg/sls-client/docs/HardwareExtraPropertiesCompmod.md index 21a0569d..f2e1c176 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesCompmod.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCompmod.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **PowerConnector** | **[]string** | An array of xnames, where each xname has type==*_pwr_connector. Empty for Mountain switch cards | [default to null] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesCompmodPowerConnector.md b/pkg/sls-client/docs/HardwareExtraPropertiesCompmodPowerConnector.md index ae76f48d..c566b351 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesCompmodPowerConnector.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesCompmodPowerConnector.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **PoweredBy** | **string** | | [default to null] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesHsnConnector.md b/pkg/sls-client/docs/HardwareExtraPropertiesHsnConnector.md index 1ce74610..b6669edb 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesHsnConnector.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesHsnConnector.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **NodeNics** | **[]string** | An array of xnames that this connector is connected to. All xnames should have type==comptype_hsn_connector_port | [default to null] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesMgmtHlSwitch.md b/pkg/sls-client/docs/HardwareExtraPropertiesMgmtHlSwitch.md index 7e4442ba..92ae41f8 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesMgmtHlSwitch.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesMgmtHlSwitch.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **IP6addr** | **string** | | [optional] [default to null] **IP4addr** | **string** | | [optional] [default to null] **Brand** | **string** | | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitch.md b/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitch.md index 0db40559..ae4d1c9c 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitch.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitch.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **IP6addr** | **string** | | [optional] [default to null] **IP4addr** | **string** | | [optional] [default to null] **Brand** | **string** | | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitchConnector.md b/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitchConnector.md index 36b266ed..5633fd7f 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitchConnector.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesMgmtSwitchConnector.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **NodeNics** | **[]string** | An array of xnames that the hardware_mgmt_switch_connector is connected to. Excludes the parent. | [default to null] **VendorName** | **string** | The vendor-assigned name for this port, as it appears in the switch management software. Typically this is something like \"GigabitEthernet 1/31\" (Berkeley-style names), but may be any string. | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesNcard.md b/pkg/sls-client/docs/HardwareExtraPropertiesNcard.md index dc7aed9c..e5235c02 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesNcard.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesNcard.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **IP6addr** | **string** | The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed. | [optional] [default to null] **IP4addr** | **string** | The ipv4 address that should be assigned to this BMC, or \"DHCPv4\". If omitted, \"DHCPv4\" is assumed. | [optional] [default to null] **Username** | **string** | The username that should be used to access the device (or be assigned to the device) | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesNode.md b/pkg/sls-client/docs/HardwareExtraPropertiesNode.md index 9f6518e9..ebe7042a 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesNode.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesNode.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **NID** | **int32** | | [optional] [default to null] **Role** | **string** | | [default to null] **SubRole** | **string** | | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesNodeHsnNic.md b/pkg/sls-client/docs/HardwareExtraPropertiesNodeHsnNic.md index 58f07089..51c8649f 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesNodeHsnNic.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesNodeHsnNic.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **Networks** | **[]string** | An array of network names that this NIC is connected to | [default to null] **Peers** | **[]string** | An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches | [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesNodeNic.md b/pkg/sls-client/docs/HardwareExtraPropertiesNodeNic.md index 2f9b9364..3d1996c7 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesNodeNic.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesNodeNic.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **Networks** | **[]string** | An array of network names that this NIC is connected to | [default to null] **Peers** | **[]string** | An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches | [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmc.md b/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmc.md index 09718d27..eaf1e8f0 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmc.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmc.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **IP6addr** | **string** | The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed. | [default to null] **IP4addr** | **string** | The ipv4 address that should be assigned to this BMC, or \"DHCPv4\". If omitted, \"DHCPv4\" is assumed. | [default to null] **Username** | **string** | The username that should be used to access the device (or be assigned to the device) | [optional] [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmcNic.md b/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmcNic.md index 95d63259..69407c38 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmcNic.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesRtrBmcNic.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **Networks** | **[]string** | An array of network names that this NIC is connected to | [default to null] **Peers** | **[]string** | An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches | [default to null] diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesRtrmod.md b/pkg/sls-client/docs/HardwareExtraPropertiesRtrmod.md index 1b8fa4de..b993ba80 100644 --- a/pkg/sls-client/docs/HardwareExtraPropertiesRtrmod.md +++ b/pkg/sls-client/docs/HardwareExtraPropertiesRtrmod.md @@ -3,6 +3,9 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**CaniId** | **string** | | [optional] [default to null] +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] **PowerConnector** | **[]string** | An array of xnames, where each xname has type==*_pwr_connector. Empty for Mountain switch cards | [default to null] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/pkg/sls-client/docs/HardwareExtraPropertiesSystem.md b/pkg/sls-client/docs/HardwareExtraPropertiesSystem.md new file mode 100644 index 00000000..0e082dd1 --- /dev/null +++ b/pkg/sls-client/docs/HardwareExtraPropertiesSystem.md @@ -0,0 +1,11 @@ +# HardwareExtraPropertiesSystem + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**CaniLastModified** | **string** | | [optional] [default to null] +**CaniSlsSchemaVersion** | **string** | | [optional] [default to null] +**CaniId** | **string** | | [optional] [default to null] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + diff --git a/pkg/sls-client/model_hardware_extra_properties.go b/pkg/sls-client/model_hardware_extra_properties.go index 06676dc2..6bf20c57 100644 --- a/pkg/sls-client/model_hardware_extra_properties.go +++ b/pkg/sls-client/model_hardware_extra_properties.go @@ -26,6 +26,8 @@ func (hardware *Hardware) DecodeExtraProperties() (result interface{}, err error result = HardwareExtraPropertiesCabPduNic{} case xnametypes.Cabinet: result = HardwareExtraPropertiesCabinet{} + case xnametypes.Chassis: + result = HardwareExtraPropertiesChassis{} case xnametypes.ChassisBMC: result = HardwareExtraPropertiesChassisBmc{} case xnametypes.ComputeModule: @@ -60,7 +62,7 @@ func (hardware *Hardware) DecodeExtraProperties() (result interface{}, err error return nil, nil } - return nil, fmt.Errorf("hardware object (%s) has unexpected properties", hardware.Xname) + return nil, fmt.Errorf("hardware object (%s) has unexpected properties of type (%T)", hardware.Xname, hardware.ExtraProperties) } // Decode the Raw extra properties into a give structure diff --git a/pkg/sls-client/model_hardware_extra_properties_bmc_nic.go b/pkg/sls-client/model_hardware_extra_properties_bmc_nic.go index a686644c..1e13e0b3 100644 --- a/pkg/sls-client/model_hardware_extra_properties_bmc_nic.go +++ b/pkg/sls-client/model_hardware_extra_properties_bmc_nic.go @@ -9,12 +9,15 @@ package sls_client type HardwareExtraPropertiesBmcNic struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed. - IP6addr string `json:"IP6addr"` + IP6addr string `json:"IP6addr" mapstructure:"IP6addr"` // The ipv4 address that should be assigned to this BMC, or \"DHCPv4\". If omitted, \"DHCPv4\" is assumed. - IP4addr string `json:"IP4addr"` + IP4addr string `json:"IP4addr" mapstructure:"IP4addr"` // The username that should be used to access the device (or be assigned to the device) - Username string `json:"Username"` + Username string `json:"Username" mapstructure:"Username"` // The password that should be used to access the device - Password string `json:"Password"` + Password string `json:"Password" mapstructure:"Password"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_cab_pdu_nic.go b/pkg/sls-client/model_hardware_extra_properties_cab_pdu_nic.go index 8aafdaf5..90c69852 100644 --- a/pkg/sls-client/model_hardware_extra_properties_cab_pdu_nic.go +++ b/pkg/sls-client/model_hardware_extra_properties_cab_pdu_nic.go @@ -9,8 +9,11 @@ package sls_client type HardwareExtraPropertiesCabPduNic struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of network names that this NIC is connected to - Networks []string `json:"Networks"` + Networks []string `json:"Networks" mapstructure:"Networks"` // An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches - Peers []string `json:"Peers"` + Peers []string `json:"Peers" mapstructure:"Peers"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_cab_pdu_pwr_connector.go b/pkg/sls-client/model_hardware_extra_properties_cab_pdu_pwr_connector.go index 7aa6289f..0bb1e0c3 100644 --- a/pkg/sls-client/model_hardware_extra_properties_cab_pdu_pwr_connector.go +++ b/pkg/sls-client/model_hardware_extra_properties_cab_pdu_pwr_connector.go @@ -9,5 +9,8 @@ package sls_client type HardwareExtraPropertiesCabPduPwrConnector struct { - PoweredBy string `json:"PoweredBy"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + PoweredBy string `json:"PoweredBy" mapstructure:"PoweredBy"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_cabinet.go b/pkg/sls-client/model_hardware_extra_properties_cabinet.go index a8614438..e72a7115 100644 --- a/pkg/sls-client/model_hardware_extra_properties_cabinet.go +++ b/pkg/sls-client/model_hardware_extra_properties_cabinet.go @@ -9,7 +9,11 @@ package sls_client type HardwareExtraPropertiesCabinet struct { - Model string `json:"Model,omitempty"` - Networks map[string]HardwareExtraPropertiesCabinetNetworks `json:"Networks,omitempty"` - DHCPRelaySwitches []string `json:"DHCPRelaySwitches,omitempty"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + Model string `json:"Model,omitempty"` + // Networks has at the top the hardware type, then inside of that the network ID, then inside of that the object. + Networks map[string]map[string]HardwareExtraPropertiesCabinetNetworks `json:"Networks,omitempty"` + DHCPRelaySwitches []string `json:"DHCPRelaySwitches,omitempty"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_cabinet_networks.go b/pkg/sls-client/model_hardware_extra_properties_cabinet_networks.go index b744d854..968fcb42 100644 --- a/pkg/sls-client/model_hardware_extra_properties_cabinet_networks.go +++ b/pkg/sls-client/model_hardware_extra_properties_cabinet_networks.go @@ -9,9 +9,9 @@ package sls_client type HardwareExtraPropertiesCabinetNetworks struct { - CIDR string `json:"CIDR,omitempty"` - Gateway string `json:"Gateway,omitempty"` - VLan int32 `json:"VLan,omitempty"` - IPv6Prefix string `json:"IPv6Prefix,omitempty"` - MACPrefix string `json:"MACPrefix,omitempty"` + CIDR string `json:"CIDR,omitempty" mapstructure:"CIDR"` + Gateway string `json:"Gateway,omitempty" mapstructure:"Gateway"` + VLan int32 `json:"VLan,omitempty" mapstructure:"VLan"` + IPv6Prefix string `json:"IPv6Prefix,omitempty" mapstructure:"IPv6Prefix"` + MACPrefix string `json:"MACPrefix,omitempty" mapstructure:"MACPrefix"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_cdu_mgmt_switch.go b/pkg/sls-client/model_hardware_extra_properties_cdu_mgmt_switch.go index 5a40da9d..01c50352 100644 --- a/pkg/sls-client/model_hardware_extra_properties_cdu_mgmt_switch.go +++ b/pkg/sls-client/model_hardware_extra_properties_cdu_mgmt_switch.go @@ -9,7 +9,10 @@ package sls_client type HardwareExtraPropertiesCduMgmtSwitch struct { - Brand string `json:"Brand,omitempty"` - Model string `json:"Model,omitempty"` - Aliases []string `json:"Aliases,omitempty"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + Brand string `json:"Brand,omitempty" mapstructure:"Brand"` + Model string `json:"Model,omitempty" mapstructure:"Model"` + Aliases []string `json:"Aliases,omitempty" mapstructure:"Aliases"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_hl_switch.go b/pkg/sls-client/model_hardware_extra_properties_chassis.go similarity index 87% rename from pkg/sls-client/model_hardware_extra_properties_hl_switch.go rename to pkg/sls-client/model_hardware_extra_properties_chassis.go index 93f02f91..5a529450 100644 --- a/pkg/sls-client/model_hardware_extra_properties_hl_switch.go +++ b/pkg/sls-client/model_hardware_extra_properties_chassis.go @@ -8,15 +8,8 @@ */ package sls_client -type HardwareExtraPropertiesHlSwitch struct { - IP6addr string `json:"IP6addr,omitempty"` - IP4addr string `json:"IP4addr,omitempty"` - Brand string `json:"Brand,omitempty"` - Model string `json:"Model,omitempty"` - SNMPAuthPassword string `json:"SNMPAuthPassword,omitempty"` - SNMPAuthProtocol string `json:"SNMPAuthProtocol,omitempty"` - SNMPPrivPassword string `json:"SNMPPrivPassword,omitempty"` - SNMPPrivProtocol string `json:"SNMPPrivProtocol,omitempty"` - SNMPUsername string `json:"SNMPUsername,omitempty"` - Aliases []string `json:"Aliases,omitempty"` +type HardwareExtraPropertiesChassis struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_chassis_bmc.go b/pkg/sls-client/model_hardware_extra_properties_chassis_bmc.go index 458716e0..ee316bcc 100644 --- a/pkg/sls-client/model_hardware_extra_properties_chassis_bmc.go +++ b/pkg/sls-client/model_hardware_extra_properties_chassis_bmc.go @@ -9,5 +9,8 @@ package sls_client type HardwareExtraPropertiesChassisBmc struct { - Aliases []string `json:"Aliases,omitempty"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + Aliases []string `json:"Aliases,omitempty" mapstructure:"Aliases"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_compmod.go b/pkg/sls-client/model_hardware_extra_properties_compmod.go index 07e3734a..823f13dc 100644 --- a/pkg/sls-client/model_hardware_extra_properties_compmod.go +++ b/pkg/sls-client/model_hardware_extra_properties_compmod.go @@ -9,6 +9,9 @@ package sls_client type HardwareExtraPropertiesCompmod struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of xnames, where each xname has type==*_pwr_connector. Empty for Mountain switch cards - PowerConnector []string `json:"PowerConnector"` + PowerConnector []string `json:"PowerConnector" mapstructure:"PowerConnector"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_compmod_power_connector.go b/pkg/sls-client/model_hardware_extra_properties_compmod_power_connector.go index 2477bc40..2216aabd 100644 --- a/pkg/sls-client/model_hardware_extra_properties_compmod_power_connector.go +++ b/pkg/sls-client/model_hardware_extra_properties_compmod_power_connector.go @@ -9,5 +9,8 @@ package sls_client type HardwareExtraPropertiesCompmodPowerConnector struct { - PoweredBy string `json:"PoweredBy"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + PoweredBy string `json:"PoweredBy" mapstructure:"PoweredBy"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_hsn_connector.go b/pkg/sls-client/model_hardware_extra_properties_hsn_connector.go index 2f5dcef3..463b1351 100644 --- a/pkg/sls-client/model_hardware_extra_properties_hsn_connector.go +++ b/pkg/sls-client/model_hardware_extra_properties_hsn_connector.go @@ -9,6 +9,9 @@ package sls_client type HardwareExtraPropertiesHsnConnector struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of xnames that this connector is connected to. All xnames should have type==comptype_hsn_connector_port - NodeNics []string `json:"NodeNics"` + NodeNics []string `json:"NodeNics" mapstructure:"NodeNics"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_mgmt_hl_switch.go b/pkg/sls-client/model_hardware_extra_properties_mgmt_hl_switch.go index be4aae11..bf225c40 100644 --- a/pkg/sls-client/model_hardware_extra_properties_mgmt_hl_switch.go +++ b/pkg/sls-client/model_hardware_extra_properties_mgmt_hl_switch.go @@ -9,14 +9,17 @@ package sls_client type HardwareExtraPropertiesMgmtHlSwitch struct { - IP6addr string `json:"IP6addr,omitempty"` - IP4addr string `json:"IP4addr,omitempty"` - Brand string `json:"Brand,omitempty"` - Model string `json:"Model,omitempty"` - SNMPAuthPassword string `json:"SNMPAuthPassword,omitempty"` - SNMPAuthProtocol string `json:"SNMPAuthProtocol,omitempty"` - SNMPPrivPassword string `json:"SNMPPrivPassword,omitempty"` - SNMPPrivProtocol string `json:"SNMPPrivProtocol,omitempty"` - SNMPUsername string `json:"SNMPUsername,omitempty"` - Aliases []string `json:"Aliases,omitempty"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + IP6addr string `json:"IP6addr,omitempty" mapstructure:"IP6addr"` + IP4addr string `json:"IP4addr,omitempty" mapstructure:"IP4addr"` + Brand string `json:"Brand,omitempty" mapstructure:"Brand"` + Model string `json:"Model,omitempty" mapstructure:"Model"` + SNMPAuthPassword string `json:"SNMPAuthPassword,omitempty" mapstructure:"SNMPAuthPassword"` + SNMPAuthProtocol string `json:"SNMPAuthProtocol,omitempty" mapstructure:"SNMPAuthProtocol"` + SNMPPrivPassword string `json:"SNMPPrivPassword,omitempty" mapstructure:"SNMPPrivPassword"` + SNMPPrivProtocol string `json:"SNMPPrivProtocol,omitempty" mapstructure:"SNMPPrivProtocol"` + SNMPUsername string `json:"SNMPUsername,omitempty" mapstructure:"SNMPUsername"` + Aliases []string `json:"Aliases,omitempty" mapstructure:"Aliases"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_mgmt_switch.go b/pkg/sls-client/model_hardware_extra_properties_mgmt_switch.go index 3fda1d79..6821c951 100644 --- a/pkg/sls-client/model_hardware_extra_properties_mgmt_switch.go +++ b/pkg/sls-client/model_hardware_extra_properties_mgmt_switch.go @@ -9,14 +9,17 @@ package sls_client type HardwareExtraPropertiesMgmtSwitch struct { - IP6addr string `json:"IP6addr,omitempty"` - IP4addr string `json:"IP4addr,omitempty"` - Brand string `json:"Brand,omitempty"` - Model string `json:"Model,omitempty"` - SNMPAuthPassword string `json:"SNMPAuthPassword,omitempty"` - SNMPAuthProtocol string `json:"SNMPAuthProtocol,omitempty"` - SNMPPrivPassword string `json:"SNMPPrivPassword,omitempty"` - SNMPPrivProtocol string `json:"SNMPPrivProtocol,omitempty"` - SNMPUsername string `json:"SNMPUsername,omitempty"` - Aliases []string `json:"Aliases,omitempty"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + IP6addr string `json:"IP6addr,omitempty" mapstructure:"IP6addr"` + IP4addr string `json:"IP4addr,omitempty" mapstructure:"IP4addr"` + Brand string `json:"Brand,omitempty" mapstructure:"Brand"` + Model string `json:"Model,omitempty" mapstructure:"Model"` + SNMPAuthPassword string `json:"SNMPAuthPassword,omitempty" mapstructure:"SNMPAuthPassword"` + SNMPAuthProtocol string `json:"SNMPAuthProtocol,omitempty" mapstructure:"SNMPAuthProtocol"` + SNMPPrivPassword string `json:"SNMPPrivPassword,omitempty" mapstructure:"SNMPPrivPassword"` + SNMPPrivProtocol string `json:"SNMPPrivProtocol,omitempty" mapstructure:"SNMPPrivProtocol"` + SNMPUsername string `json:"SNMPUsername,omitempty" mapstructure:"SNMPUsername"` + Aliases []string `json:"Aliases,omitempty" mapstructure:"Aliases"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_mgmt_switch_connector.go b/pkg/sls-client/model_hardware_extra_properties_mgmt_switch_connector.go index 3c1eff5d..ed292d76 100644 --- a/pkg/sls-client/model_hardware_extra_properties_mgmt_switch_connector.go +++ b/pkg/sls-client/model_hardware_extra_properties_mgmt_switch_connector.go @@ -9,8 +9,11 @@ package sls_client type HardwareExtraPropertiesMgmtSwitchConnector struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of xnames that the hardware_mgmt_switch_connector is connected to. Excludes the parent. - NodeNics []string `json:"NodeNics"` + NodeNics []string `json:"NodeNics" mapstructure:"NodeNics"` // The vendor-assigned name for this port, as it appears in the switch management software. Typically this is something like \"GigabitEthernet 1/31\" (Berkeley-style names), but may be any string. - VendorName string `json:"VendorName,omitempty"` + VendorName string `json:"VendorName,omitempty" mapstructure:"VendorName"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_ncard.go b/pkg/sls-client/model_hardware_extra_properties_ncard.go index d42c9312..b0df5baf 100644 --- a/pkg/sls-client/model_hardware_extra_properties_ncard.go +++ b/pkg/sls-client/model_hardware_extra_properties_ncard.go @@ -9,12 +9,15 @@ package sls_client type HardwareExtraPropertiesNcard struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed. - IP6addr string `json:"IP6addr,omitempty"` + IP6addr string `json:"IP6addr,omitempty" mapstructure:"IP6addr"` // The ipv4 address that should be assigned to this BMC, or \"DHCPv4\". If omitted, \"DHCPv4\" is assumed. - IP4addr string `json:"IP4addr,omitempty"` + IP4addr string `json:"IP4addr,omitempty" mapstructure:"IP4addr"` // The username that should be used to access the device (or be assigned to the device) - Username string `json:"Username,omitempty"` + Username string `json:"Username,omitempty" mapstructure:"Username"` // The password that should be used to access the device (or be assigned to the device) - Password string `json:"Password,omitempty"` + Password string `json:"Password,omitempty" mapstructure:"Password"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_node.go b/pkg/sls-client/model_hardware_extra_properties_node.go index 5e51f46b..6fa36307 100644 --- a/pkg/sls-client/model_hardware_extra_properties_node.go +++ b/pkg/sls-client/model_hardware_extra_properties_node.go @@ -9,8 +9,11 @@ package sls_client type HardwareExtraPropertiesNode struct { - NID int32 `json:"NID,omitempty"` - Role string `json:"Role"` - SubRole string `json:"SubRole,omitempty"` - Aliases []string `json:"Aliases"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + NID int32 `json:"NID,omitempty" mapstructure:"NID"` + Role string `json:"Role" mapstructure:"Role"` + SubRole string `json:"SubRole,omitempty" mapstructure:"SubRole"` + Aliases []string `json:"Aliases" mapstructure:"Aliases"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_node_hsn_nic.go b/pkg/sls-client/model_hardware_extra_properties_node_hsn_nic.go index 426ce14d..442fd673 100644 --- a/pkg/sls-client/model_hardware_extra_properties_node_hsn_nic.go +++ b/pkg/sls-client/model_hardware_extra_properties_node_hsn_nic.go @@ -9,8 +9,11 @@ package sls_client type HardwareExtraPropertiesNodeHsnNic struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of network names that this NIC is connected to - Networks []string `json:"Networks"` + Networks []string `json:"Networks" mapstructure:"Networks"` // An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches - Peers []string `json:"Peers"` + Peers []string `json:"Peers" mapstructure:"Peers"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_node_nic.go b/pkg/sls-client/model_hardware_extra_properties_node_nic.go index 2a9f7471..fb06a464 100644 --- a/pkg/sls-client/model_hardware_extra_properties_node_nic.go +++ b/pkg/sls-client/model_hardware_extra_properties_node_nic.go @@ -9,8 +9,11 @@ package sls_client type HardwareExtraPropertiesNodeNic struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of network names that this NIC is connected to - Networks []string `json:"Networks"` + Networks []string `json:"Networks" mapstructure:"Networks"` // An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches - Peers []string `json:"Peers"` + Peers []string `json:"Peers" mapstructure:"Peers"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_rtr_bmc.go b/pkg/sls-client/model_hardware_extra_properties_rtr_bmc.go index 57c166ff..1a386da8 100644 --- a/pkg/sls-client/model_hardware_extra_properties_rtr_bmc.go +++ b/pkg/sls-client/model_hardware_extra_properties_rtr_bmc.go @@ -9,12 +9,15 @@ package sls_client type HardwareExtraPropertiesRtrBmc struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed. - IP6addr string `json:"IP6addr"` + IP6addr string `json:"IP6addr" mapstructure:"IP6addr"` // The ipv4 address that should be assigned to this BMC, or \"DHCPv4\". If omitted, \"DHCPv4\" is assumed. - IP4addr string `json:"IP4addr"` + IP4addr string `json:"IP4addr" mapstructure:"IP4addr"` // The username that should be used to access the device (or be assigned to the device) - Username string `json:"Username,omitempty"` + Username string `json:"Username,omitempty" mapstructure:"Username"` // The password that should be used to access the device (or be assigned to the device) - Password string `json:"Password,omitempty"` + Password string `json:"Password,omitempty" mapstructure:"Password"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_rtr_bmc_nic.go b/pkg/sls-client/model_hardware_extra_properties_rtr_bmc_nic.go index af7a0e80..117d6297 100644 --- a/pkg/sls-client/model_hardware_extra_properties_rtr_bmc_nic.go +++ b/pkg/sls-client/model_hardware_extra_properties_rtr_bmc_nic.go @@ -9,8 +9,11 @@ package sls_client type HardwareExtraPropertiesRtrBmcNic struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of network names that this NIC is connected to - Networks []string `json:"Networks"` + Networks []string `json:"Networks" mapstructure:"Networks"` // An array of xnames this NIC is connected directly to. These ideally connector xnames, not switches - Peers []string `json:"Peers"` + Peers []string `json:"Peers" mapstructure:"Peers"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_rtrmod.go b/pkg/sls-client/model_hardware_extra_properties_rtrmod.go index 178ada0f..72cb9f7d 100644 --- a/pkg/sls-client/model_hardware_extra_properties_rtrmod.go +++ b/pkg/sls-client/model_hardware_extra_properties_rtrmod.go @@ -9,6 +9,9 @@ package sls_client type HardwareExtraPropertiesRtrmod struct { + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` // An array of xnames, where each xname has type==*_pwr_connector. Empty for Mountain switch cards - PowerConnector []string `json:"PowerConnector"` + PowerConnector []string `json:"PowerConnector" mapstructure:"PowerConnector"` } diff --git a/pkg/sls-client/model_hardware_extra_properties_system.go b/pkg/sls-client/model_hardware_extra_properties_system.go new file mode 100644 index 00000000..36cc3900 --- /dev/null +++ b/pkg/sls-client/model_hardware_extra_properties_system.go @@ -0,0 +1,15 @@ +/* + * System Layout Service + * + * System Layout Service (SLS) holds information on the complete, designed system. SLS gets this information from an input file on the system. Besides information like what hardware should be present in a system, SLS also stores information about what network connections exist and what power connections exist. SLS details the physical locations of network hardware, compute nodes and cabinets. Further, it stores information about the network, such as which port on which switch should be connected to each compute node. The API allows updating this information as well. Note that SLS is not responsible for verifying that the system is set up correctly. It only lets the Shasta system know what the system should be configured with. SLS does not store the details of the actual hardware like hardware identifiers. Instead it stores a generalized abstraction of the system that other services may use. SLS thus does not need to change as hardware within the system is replaced. Interaction with SLS is required if the system setup changes – for example, if system cabling is altered or during installation, expansion, or reduction. SLS does not interact with the hardware. Each object in SLS has the following basic properties: * Parent – Each object in SLS has a parent object except the system root (s0). * Children – Objects may have children. * xname – Every object has an xname – a unique identifier for that object. * Type – a hardware type like \"comptype_ncard\", \"comptype_cabinet\". * Class – kind of hardware like \"River\" or \"Mountain\" * TypeString – a human readable type like \"Cabinet\" Some objects may have additional properties depending on their type. For example, additional properties for cabinets include \"Network\", \"IP6Prefix\", \"IP4Base\", \"MACprefix\" etc. ## Resources ### /hardware Create hardware entries in SLS. This resource can be used when you add new components or expand your system. Interaction with this resource is not required if a component is removed or replaced. ### /hardware/{xname} Retrieve, update, or delete information about specific xnames. ### /search/hardware Uses HTTP query parameters to find hardware entries with matching properties. Returns a JSON list of xnames. If multiple query parameters are passed, any returned hardware must match all parameters. For example, a query string of \"?parent=x0\" would return a list of all children of cabinet x0. A query string of \"?type=comptype_node\" would return a list of all compute nodes. Valid query parameters are: xname, parent, class, type, power_connector, node_nics, networks, peers. ### /search/networks Uses HTTP query parameters to find network entries with matching properties. ### /networks Create new network objects or retrieve networks available in the system. ### /networks/{network} Retrieve, update, or delete information about specific networks. ### /dumpstate Dumps the current database state of the service. This may be useful when you are backing up the system or planning a reinstall of the system. ### /loadstate Upload and overwrite the current database with the contents of the posted data. The posted data should be a state dump from /dumpstate. This may be useful to restore the SLS database after you have reinstalled the system. ## Workflows ### Backup and Restore the SLS Database for Reinstallation #### GET /dumpstate Perform a dump of the current state of the SLS data. This should be done before reinstalling the system. The database dump is a JSON blob in an SLS-specific format. #### POST /loadstate Reimport the dump from /dumpstate and restore the SLS database after reinstall. ### Expand System #### POST /hardware Add the new hardware objects. #### GET /hardware/{xname} Review hardware properties of the xname from the JSON array. ### Remove Hardware #### DELETE /hardware Remove hardware from SLS ### Modify Hardware Properties #### PATCH /hardware Modify hardware properties in SLS. Only additional properties can be modified. Basic properties like xname, parent, children, type, class, typestring cannot be modified. + * + * API version: 0.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package sls_client + +type HardwareExtraPropertiesSystem struct { + CaniLastModified string `json:"@cani.lastModified,omitempty" mapstructure:"@cani.lastModified"` + CaniSlsSchemaVersion string `json:"@cani.slsSchemaVersion,omitempty" mapstructure:"@cani.slsSchemaVersion"` + CaniId string `json:"@cani.id,omitempty" mapstructure:"@cani.id"` +} diff --git a/pkg/sls-client/model_hardware_post.go b/pkg/sls-client/model_hardware_post.go index 0c25f121..b7bad282 100644 --- a/pkg/sls-client/model_hardware_post.go +++ b/pkg/sls-client/model_hardware_post.go @@ -9,7 +9,7 @@ package sls_client type HardwarePost struct { - Xname string `json:"Xname"` - Class *HardwareClass `json:"Class"` - ExtraProperties *HardwareExtraProperties `json:"ExtraProperties,omitempty"` + Xname string `json:"Xname" mapstructure:"Xname"` + Class *HardwareClass `json:"Class" mapstructure:"Class"` + ExtraProperties *HardwareExtraProperties `json:"ExtraProperties,omitempty" mapstructure:"ExtraProperties"` } diff --git a/pkg/sls-client/model_hardware_put.go b/pkg/sls-client/model_hardware_put.go index 372a0ef7..c31c0252 100644 --- a/pkg/sls-client/model_hardware_put.go +++ b/pkg/sls-client/model_hardware_put.go @@ -9,6 +9,6 @@ package sls_client type HardwarePut struct { - Class *HardwareClass `json:"Class"` - ExtraProperties *HardwareExtraProperties `json:"ExtraProperties,omitempty"` + Class *HardwareClass `json:"Class" mapstructure:"Class"` + ExtraProperties *HardwareExtraProperties `json:"ExtraProperties,omitempty" mapstructure:"ExtraProperties"` } diff --git a/pkg/sls-client/model_inline_response_200.go b/pkg/sls-client/model_inline_response_200.go index fe92732d..1d9d76bb 100644 --- a/pkg/sls-client/model_inline_response_200.go +++ b/pkg/sls-client/model_inline_response_200.go @@ -10,7 +10,7 @@ package sls_client type InlineResponse200 struct { // Status of the Vault. - Vault string `json:"Vault"` + Vault string `json:"Vault" mapstructure:"Vault"` // Status of the connection with the database. - DBConnection string `json:"DBConnection"` + DBConnection string `json:"DBConnection" mapstructure:"DBConnection"` } diff --git a/pkg/sls-client/model_loadstate_body.go b/pkg/sls-client/model_loadstate_body.go index afaa07b4..12966695 100644 --- a/pkg/sls-client/model_loadstate_body.go +++ b/pkg/sls-client/model_loadstate_body.go @@ -9,5 +9,5 @@ package sls_client type LoadstateBody struct { - SlsDump *SlsState `json:"sls_dump,omitempty"` + SlsDump *SlsState `json:"sls_dump,omitempty" mapstructure:"sls_dump"` } diff --git a/pkg/sls-client/model_network.go b/pkg/sls-client/model_network.go index 71e26de2..150b3a98 100644 --- a/pkg/sls-client/model_network.go +++ b/pkg/sls-client/model_network.go @@ -9,11 +9,11 @@ package sls_client type Network struct { - Name string `json:"Name"` - FullName string `json:"FullName,omitempty"` - IPRanges []string `json:"IPRanges"` - Type_ string `json:"Type"` - LastUpdated int32 `json:"LastUpdated,omitempty"` - LastUpdatedTime string `json:"LastUpdatedTime,omitempty"` - ExtraProperties *NetworkExtraProperties `json:"ExtraProperties,omitempty"` + Name string `json:"Name" mapstructure:"Name"` + FullName string `json:"FullName,omitempty" mapstructure:"FullName"` + IPRanges []string `json:"IPRanges" mapstructure:"IPRanges"` + Type_ string `json:"Type" mapstructure:"Type"` + LastUpdated int32 `json:"LastUpdated,omitempty" mapstructure:"LastUpdated"` + LastUpdatedTime string `json:"LastUpdatedTime,omitempty" mapstructure:"LastUpdatedTime"` + ExtraProperties *NetworkExtraProperties `json:"ExtraProperties,omitempty" mapstructure:"ExtraProperties"` } diff --git a/pkg/sls-client/model_network_extra_properties.go b/pkg/sls-client/model_network_extra_properties.go index 152f4804..97d17d7d 100644 --- a/pkg/sls-client/model_network_extra_properties.go +++ b/pkg/sls-client/model_network_extra_properties.go @@ -9,9 +9,9 @@ package sls_client type NetworkExtraProperties struct { - CIDR string `json:"CIDR,omitempty"` - VlanRange []int32 `json:"VlanRange,omitempty"` - MTU int32 `json:"MTU,omitempty"` - Subnets []NetworkIpv4Subnet `json:"Subnets,omitempty"` - Comment string `json:"Comment,omitempty"` + CIDR string `json:"CIDR,omitempty" mapstructure:"CIDR"` + VlanRange []int32 `json:"VlanRange,omitempty" mapstructure:"VlanRange"` + MTU int32 `json:"MTU,omitempty" mapstructure:"MTU"` + Subnets []NetworkIpv4Subnet `json:"Subnets,omitempty" mapstructure:"Subnets"` + Comment string `json:"Comment,omitempty" mapstructure:"Comment"` } diff --git a/pkg/sls-client/model_network_ip_reservation.go b/pkg/sls-client/model_network_ip_reservation.go index 35eac06a..eed70198 100644 --- a/pkg/sls-client/model_network_ip_reservation.go +++ b/pkg/sls-client/model_network_ip_reservation.go @@ -9,8 +9,8 @@ package sls_client type NetworkIpReservation struct { - IPAddress string `json:"IPAddress"` - Name string `json:"Name"` - Aliases []string `json:"Aliases,omitempty"` - Comment string `json:"Comment,omitempty"` + IPAddress string `json:"IPAddress" mapstructure:"IPAddress"` + Name string `json:"Name" mapstructure:"Name"` + Aliases []string `json:"Aliases,omitempty" mapstructure:"Aliases"` + Comment string `json:"Comment,omitempty" mapstructure:"Comment"` } diff --git a/pkg/sls-client/model_network_ipv4_subnet.go b/pkg/sls-client/model_network_ipv4_subnet.go index 7115cc97..05835e68 100644 --- a/pkg/sls-client/model_network_ipv4_subnet.go +++ b/pkg/sls-client/model_network_ipv4_subnet.go @@ -9,13 +9,13 @@ package sls_client type NetworkIpv4Subnet struct { - Name string `json:"Name"` - FullName string `json:"FullName,omitempty"` - CIDR string `json:"CIDR"` - VlanID int32 `json:"VlanID"` - Gateway string `json:"Gateway,omitempty"` - DHCPStart string `json:"DHCPStart,omitempty"` - DHCPEnd string `json:"DHCPEnd,omitempty"` - IPReservations []NetworkIpReservation `json:"IPReservations,omitempty"` - Comment string `json:"Comment,omitempty"` + Name string `json:"Name" mapstructure:"Name"` + FullName string `json:"FullName,omitempty" mapstructure:"FullName"` + CIDR string `json:"CIDR" mapstructure:"CIDR"` + VlanID int32 `json:"VlanID" mapstructure:"VlanID"` + Gateway string `json:"Gateway,omitempty" mapstructure:"Gateway"` + DHCPStart string `json:"DHCPStart,omitempty" mapstructure:"DHCPStart"` + DHCPEnd string `json:"DHCPEnd,omitempty" mapstructure:"DHCPEnd"` + IPReservations []NetworkIpReservation `json:"IPReservations,omitempty" mapstructure:"IPReservations"` + Comment string `json:"Comment,omitempty" mapstructure:"Comment"` } diff --git a/pkg/sls-client/model_problem7807.go b/pkg/sls-client/model_problem7807.go index 74ead266..d7a25d02 100644 --- a/pkg/sls-client/model_problem7807.go +++ b/pkg/sls-client/model_problem7807.go @@ -10,9 +10,9 @@ package sls_client // RFC 7807 compliant error payload. All fields are optional except the 'type' field. type Problem7807 struct { - Type_ string `json:"type"` - Detail string `json:"detail,omitempty"` - Instance string `json:"instance,omitempty"` - Status float64 `json:"status,omitempty"` - Title string `json:"title,omitempty"` + Type_ string `json:"type" mapstructure:"type"` + Detail string `json:"detail,omitempty" mapstructure:"detail"` + Instance string `json:"instance,omitempty" mapstructure:"instance"` + Status float64 `json:"status,omitempty" mapstructure:"status"` + Title string `json:"title,omitempty" mapstructure:"title"` } diff --git a/pkg/sls-client/model_sls_state.go b/pkg/sls-client/model_sls_state.go index a57d57a3..3e21cdba 100644 --- a/pkg/sls-client/model_sls_state.go +++ b/pkg/sls-client/model_sls_state.go @@ -9,6 +9,6 @@ package sls_client type SlsState struct { - Hardware map[string]Hardware `json:"Hardware,omitempty"` - Networks map[string]Network `json:"Networks,omitempty"` + Hardware map[string]Hardware `json:"Hardware,omitempty" mapstructure:"Hardware"` + Networks map[string]Network `json:"Networks,omitempty" mapstructure:"Networks"` } diff --git a/pkg/sls-client/model_version_response.go b/pkg/sls-client/model_version_response.go index 77c0f73f..23d6ecee 100644 --- a/pkg/sls-client/model_version_response.go +++ b/pkg/sls-client/model_version_response.go @@ -14,7 +14,7 @@ import ( type VersionResponse struct { // A monotonically increasing counter that increases every time a change is made to SLS - Counter int32 `json:"counter,omitempty"` + Counter int32 `json:"counter,omitempty" mapstructure:"counter"` // An ISO-8601 datetime representing when a change was last made to SLS - LastUpdated time.Time `json:"last_updated,omitempty"` + LastUpdated time.Time `json:"last_updated,omitempty" mapstructure:"last_updated"` } diff --git a/pkg/sls-client/openapi.yaml b/pkg/sls-client/openapi.yaml index 2a617d84..e3559eff 100644 --- a/pkg/sls-client/openapi.yaml +++ b/pkg/sls-client/openapi.yaml @@ -931,10 +931,26 @@ components: $ref: '#/components/schemas/hardware_extra_properties' + hardware_extra_properties_system: + type: object + # Required + properties: + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string + "@cani.id": + type: string hardware_extra_properties_bmc_nic: type: object required: ["IP6addr", "IP4addr", "Username", "Password"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string IP6addr: type: string description: "The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed." @@ -959,6 +975,12 @@ components: type: object required: ["Networks", "Peers"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string Networks: type: array items: @@ -973,12 +995,24 @@ components: type: object required: ["PoweredBy"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string PoweredBy: $ref: '#/components/schemas/xname' description: "The hardware this cable is connected to. May be any type of object. Parent is excluded" hardware_extra_properties_cabinet: type: object properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string Model: type: string Networks: @@ -1015,6 +1049,12 @@ components: hardware_extra_properties_cdu_mgmt_switch: type: object properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string Brand: type: string Model: @@ -1024,10 +1064,24 @@ components: items: type: string # hardware_extra_properties_cec: {} - # hardware_extra_properties_chassis: {} + hardware_extra_properties_chassis: + type: object + properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string hardware_extra_properties_chassis_bmc: type: object properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string Aliases: type: array items: @@ -1038,6 +1092,12 @@ components: type: object required: ["PowerConnector"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string PowerConnector: type: array items: @@ -1047,6 +1107,12 @@ components: type: object required: ["PoweredBy"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string PoweredBy: $ref: '#/components/schemas/xname' description: "The hardware this cable is connected to. May be any type of object. Parent is excluded" @@ -1054,6 +1120,12 @@ components: hardware_extra_properties_mgmt_hl_switch: type: object properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string IP6addr: type: string IP4addr: @@ -1083,6 +1155,12 @@ components: type: object required: ["NodeNics"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string NodeNics: type: array items: @@ -1093,6 +1171,12 @@ components: hardware_extra_properties_mgmt_switch: type: object properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string IP6addr: type: string IP4addr: @@ -1119,6 +1203,12 @@ components: type: object required: ["NodeNics"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string NodeNics: type: array items: @@ -1131,6 +1221,12 @@ components: hardware_extra_properties_ncard: # NODEBMC type: object properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string IP6addr: type: string description: "The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed." @@ -1151,6 +1247,12 @@ components: type: object required: ["Role", "Aliases"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string NID: type: integer minimum: 0 @@ -1172,6 +1274,12 @@ components: type: object required: ["Networks", "Peers"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string Networks: type: array items: @@ -1186,6 +1294,12 @@ components: type: object required: ["Networks", "Peers"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string Networks: type: array items: @@ -1201,6 +1315,12 @@ components: type: object required: ["IP6addr", "IP4addr"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string IP6addr: type: string description: "The ipv6 address that should be assigned to this BMC, or \"DHCPv6\". If omitted, \"DHCPv6\" is assumed." @@ -1221,6 +1341,12 @@ components: type: object required: ["Networks", "Peers"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string Networks: type: array items: @@ -1237,6 +1363,12 @@ components: type: object required: ["PowerConnector"] properties: + "@cani.id": + type: string + "@cani.lastModified": + type: string + "@cani.slsSchemaVersion": + type: string PowerConnector: type: array items: @@ -1336,6 +1468,7 @@ components: - $ref: '#/components/schemas/hardware_extra_properties_rtr_bmc' - $ref: '#/components/schemas/hardware_extra_properties_rtr_bmc_nic' - $ref: '#/components/schemas/hardware_extra_properties_rtrmod' + - $ref: '#/components/schemas/hardware_extra_properties_system' slsState: type: object diff --git a/pkg/sls-client/templates/model.mustache b/pkg/sls-client/templates/model.mustache new file mode 100644 index 00000000..a6a6075f --- /dev/null +++ b/pkg/sls-client/templates/model.mustache @@ -0,0 +1,43 @@ +{{>partial_header}} +package {{packageName}} +{{#models}} +{{#imports}} +{{#@first}} +import ( +{{/@first}} + "{{import}}" +{{#@last}} +) +{{/@last}} +{{/imports}} +{{#model}}{{#isEnum}}{{#description}}// {{{classname}}} : {{{description}}}{{/description}} +type {{{classname}}} {{^format}}{{dataType}}{{/format}}{{#format}}{{{format}}}{{/format}} + +// List of {{{name}}} +const ( + {{#allowableValues}} + {{#enumVars}} + {{^@first}} + {{/@first}} + {{name}}_{{{classname}}} {{{classname}}} = "{{{value}}}" + {{/enumVars}} + {{/allowableValues}} +){{/isEnum}}{{^isEnum}}{{#description}} +// {{{description}}}{{/description}} +type {{classname}} struct { +{{#isComposedModel}} + {{#interfaceModels}} + {{classname}} + {{/interfaceModels}} +{{/isComposedModel}} +{{^isComposedModel}} +{{#vars}} +{{^@first}} +{{/@first}} +{{#description}} + // {{{description}}} +{{/description}} + {{name}} {{^isEnum}}{{^isPrimitiveType}}{{^isContainer}}{{^isDateTime}}*{{/isDateTime}}{{/isContainer}}{{/isPrimitiveType}}{{/isEnum}}{{{datatype}}} `json:"{{baseName}}{{^required}},omitempty{{/required}}" mapstructure:"{{baseName}}"{{#withXml}} xml:"{{baseName}}"{{/withXml}}` +{{/vars}} +{{/isComposedModel}} +}{{/isEnum}}{{/model}}{{/models}} \ No newline at end of file diff --git a/vendor/github.com/Cray-HPE/hms-xname/xnames/util.go b/vendor/github.com/Cray-HPE/hms-xname/xnames/util.go index a9089dfa..0801d793 100644 --- a/vendor/github.com/Cray-HPE/hms-xname/xnames/util.go +++ b/vendor/github.com/Cray-HPE/hms-xname/xnames/util.go @@ -1,6 +1,6 @@ // MIT License // -// (C) Copyright [2021-2022] Hewlett Packard Enterprise Development LP +// (C) Copyright [2021-2023] Hewlett Packard Enterprise Development LP // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), @@ -518,3 +518,14 @@ func FromString(xname string) Xname { } return component } + +func FromStringToStruct[T Xname](xname string) *T { + resultRaw := FromString(xname) + + result, ok := resultRaw.(T) + if !ok { + return nil + } + + return &result +} diff --git a/vendor/github.com/Cray-HPE/hms-xname/xnames/xnametypes.json b/vendor/github.com/Cray-HPE/hms-xname/xnames/xnametypes.json new file mode 100644 index 00000000..9a988757 --- /dev/null +++ b/vendor/github.com/Cray-HPE/hms-xname/xnames/xnametypes.json @@ -0,0 +1,1230 @@ +{ + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CDUMgmtSwitch", + "ParentType": "CDU", + "ExampleString": "dDwW", + "Regex": {}, + "GenStr": "d%dw%d", + "NumArgs": 2 + }, + "Fields": [ + "CDU", + "CDUMgmtSwitch" + ], + "FieldPlaceHolders": [ + "dD", + "wW" + ], + "FunctionParameter": "cDUMgmtSwitch" + } + ], + "Entry": { + "Type": "CDU", + "ParentType": "System", + "ExampleString": "dD", + "Regex": {}, + "GenStr": "d%d", + "NumArgs": 1 + }, + "Fields": [ + "CDU" + ], + "FieldPlaceHolders": [ + "dD" + ], + "FunctionParameter": "cDU" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CEC", + "ParentType": "Cabinet", + "ExampleString": "xXeE", + "Regex": {}, + "GenStr": "x%de%d", + "NumArgs": 2 + }, + "Fields": [ + "Cabinet", + "CEC" + ], + "FieldPlaceHolders": [ + "xX", + "eE" + ], + "FunctionParameter": "cEC" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CabinetBMC", + "ParentType": "Cabinet", + "ExampleString": "xXbB", + "Regex": {}, + "GenStr": "x%db%d", + "NumArgs": 2 + }, + "Fields": [ + "Cabinet", + "CabinetBMC" + ], + "FieldPlaceHolders": [ + "xX", + "bB" + ], + "FunctionParameter": "cabinetBMC" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CabinetCDU", + "ParentType": "Cabinet", + "ExampleString": "xXdD", + "Regex": {}, + "GenStr": "x%dd%d", + "NumArgs": 2 + }, + "Fields": [ + "Cabinet", + "CabinetCDU" + ], + "FieldPlaceHolders": [ + "xX", + "dD" + ], + "FunctionParameter": "cabinetCDU" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CabinetPDUOutlet", + "ParentType": "CabinetPDU", + "ExampleString": "xXmMpPjJ", + "Regex": {}, + "GenStr": "x%dm%dp%dj%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "CabinetPDUController", + "CabinetPDU", + "CabinetPDUOutlet" + ], + "FieldPlaceHolders": [ + "xX", + "mM", + "pP", + "jJ" + ], + "FunctionParameter": "cabinetPDUOutlet" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CabinetPDUPowerConnector", + "ParentType": "CabinetPDU", + "ExampleString": "xXmMpPvV", + "Regex": {}, + "GenStr": "x%dm%dp%dv%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "CabinetPDUController", + "CabinetPDU", + "CabinetPDUPowerConnector" + ], + "FieldPlaceHolders": [ + "xX", + "mM", + "pP", + "vV" + ], + "FunctionParameter": "cabinetPDUPowerConnector" + } + ], + "Entry": { + "Type": "CabinetPDU", + "ParentType": "CabinetPDUController", + "ExampleString": "xXmMpP", + "Regex": {}, + "GenStr": "x%dm%dp%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "CabinetPDUController", + "CabinetPDU" + ], + "FieldPlaceHolders": [ + "xX", + "mM", + "pP" + ], + "FunctionParameter": "cabinetPDU" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CabinetPDUNic", + "ParentType": "CabinetPDUController", + "ExampleString": "xXmMpPiI", + "Regex": {}, + "GenStr": "x%dm%di%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "CabinetPDUController", + "CabinetPDUNic" + ], + "FieldPlaceHolders": [ + "xX", + "mM", + "iI" + ], + "FunctionParameter": "cabinetPDUNic" + } + ], + "Entry": { + "Type": "CabinetPDUController", + "ParentType": "Cabinet", + "ExampleString": "xXmM", + "Regex": {}, + "GenStr": "x%dm%d", + "NumArgs": 2 + }, + "Fields": [ + "Cabinet", + "CabinetPDUController" + ], + "FieldPlaceHolders": [ + "xX", + "mM" + ], + "FunctionParameter": "cabinetPDUController" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CMMFpga", + "ParentType": "Chassis", + "ExampleString": "xXcCfF", + "Regex": {}, + "GenStr": "x%dc%df%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "Chassis", + "CMMFpga" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "fF" + ], + "FunctionParameter": "cMMFpga" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "CMMRectifier", + "ParentType": "Chassis", + "ExampleString": "xXcCtT", + "Regex": {}, + "GenStr": "x%dc%dt%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "Chassis", + "CMMRectifier" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "tT" + ], + "FunctionParameter": "cMMRectifier" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "ChassisBMCNic", + "ParentType": "ChassisBMC", + "ExampleString": "xXcCbBiI", + "Regex": {}, + "GenStr": "x%dc%db%di%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ChassisBMC", + "ChassisBMCNic" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "bB", + "iI" + ], + "FunctionParameter": "chassisBMCNic" + } + ], + "Entry": { + "Type": "ChassisBMC", + "ParentType": "Chassis", + "ExampleString": "xXcCbB", + "Regex": {}, + "GenStr": "x%dc%db%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ChassisBMC" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "bB" + ], + "FunctionParameter": "chassisBMC" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "Memory", + "ParentType": "Node", + "ExampleString": "xXcCsSbBnNdD", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%dd%d", + "NumArgs": 6 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "Memory" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "dD" + ], + "FunctionParameter": "memory" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodeAccel", + "ParentType": "Node", + "ExampleString": "xXcCsSbBnNaA", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%da%d", + "NumArgs": 6 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "NodeAccel" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "aA" + ], + "FunctionParameter": "nodeAccel" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodeAccelRiser", + "ParentType": "Node", + "ExampleString": "xXcCsSbBnNrR", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%dr%d", + "NumArgs": 6 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "NodeAccelRiser" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "rR" + ], + "FunctionParameter": "nodeAccelRiser" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodeHsnNic", + "ParentType": "Node", + "ExampleString": "xXcCsSbBnNhH", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%dh%d", + "NumArgs": 6 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "NodeHsnNic" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "hH" + ], + "FunctionParameter": "nodeHsnNic" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodeNic", + "ParentType": "Node", + "ExampleString": "xXcCsSbBnNiI", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%di%d", + "NumArgs": 6 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "NodeNic" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "iI" + ], + "FunctionParameter": "nodeNic" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "Processor", + "ParentType": "Node", + "ExampleString": "xXcCsSbBnNpP", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%dp%d", + "NumArgs": 6 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "Processor" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "pP" + ], + "FunctionParameter": "processor" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "Drive", + "ParentType": "StorageGroup", + "ExampleString": "xXcCsSbBnNgGkK", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%dg%dk%d", + "NumArgs": 7 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "StorageGroup", + "Drive" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "gG", + "kK" + ], + "FunctionParameter": "drive" + } + ], + "Entry": { + "Type": "StorageGroup", + "ParentType": "Node", + "ExampleString": "xXcCsSbBnNgG", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%dg%d", + "NumArgs": 6 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node", + "StorageGroup" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN", + "gG" + ], + "FunctionParameter": "storageGroup" + } + ], + "Entry": { + "Type": "Node", + "ParentType": "NodeBMC", + "ExampleString": "xXcCsSbBnN", + "Regex": {}, + "GenStr": "x%dc%ds%db%dn%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "Node" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "nN" + ], + "FunctionParameter": "node" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodeBMCNic", + "ParentType": "NodeBMC", + "ExampleString": "xXcCsSbBiI", + "Regex": {}, + "GenStr": "x%dc%ds%db%di%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC", + "NodeBMCNic" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB", + "iI" + ], + "FunctionParameter": "nodeBMCNic" + } + ], + "Entry": { + "Type": "NodeBMC", + "ParentType": "ComputeModule", + "ExampleString": "xXcCsSbB", + "Regex": {}, + "GenStr": "x%dc%ds%db%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeBMC" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "bB" + ], + "FunctionParameter": "nodeBMC" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodeEnclosurePowerSupply", + "ParentType": "NodeEnclosure", + "ExampleString": "xXcCsSbBeEtT", + "Regex": {}, + "GenStr": "x%dc%ds%de%dt%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeEnclosure", + "NodeEnclosurePowerSupply" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "eE", + "tT" + ], + "FunctionParameter": "nodeEnclosurePowerSupply" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodeFpga", + "ParentType": "NodeEnclosure", + "ExampleString": "xXcCsSbBfF", + "Regex": {}, + "GenStr": "x%dc%ds%db%df%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeEnclosure", + "NodeFpga" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "eE", + "fF" + ], + "FunctionParameter": "nodeFpga" + } + ], + "Entry": { + "Type": "NodeEnclosure", + "ParentType": "ComputeModule", + "ExampleString": "xXcCsSbBeE", + "Regex": {}, + "GenStr": "x%dc%ds%de%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodeEnclosure" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "eE" + ], + "FunctionParameter": "nodeEnclosure" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "NodePowerConnector", + "ParentType": "ComputeModule", + "ExampleString": "xXcCsSv", + "Regex": {}, + "GenStr": "x%dc%ds%dv%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule", + "NodePowerConnector" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS", + "Sv" + ], + "FunctionParameter": "nodePowerConnector" + } + ], + "Entry": { + "Type": "ComputeModule", + "ParentType": "Chassis", + "ExampleString": "xXcCsS", + "Regex": {}, + "GenStr": "x%dc%ds%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "Chassis", + "ComputeModule" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "sS" + ], + "FunctionParameter": "computeModule" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "MgmtHLSwitch", + "ParentType": "MgmtHLSwitchEnclosure", + "ExampleString": "xXcChHsS", + "Regex": {}, + "GenStr": "x%dc%dh%ds%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "MgmtHLSwitchEnclosure", + "MgmtHLSwitch" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "hH", + "sS" + ], + "FunctionParameter": "mgmtHLSwitch" + } + ], + "Entry": { + "Type": "MgmtHLSwitchEnclosure", + "ParentType": "Chassis", + "ExampleString": "xXcChH", + "Regex": {}, + "GenStr": "x%dc%dh%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "Chassis", + "MgmtHLSwitchEnclosure" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "hH" + ], + "FunctionParameter": "mgmtHLSwitchEnclosure" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "MgmtSwitchConnector", + "ParentType": "MgmtSwitch", + "ExampleString": "xXcCwWjJ", + "Regex": {}, + "GenStr": "x%dc%dw%dj%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "MgmtSwitch", + "MgmtSwitchConnector" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "wW", + "jJ" + ], + "FunctionParameter": "mgmtSwitchConnector" + } + ], + "Entry": { + "Type": "MgmtSwitch", + "ParentType": "Chassis", + "ExampleString": "xXcCwW", + "Regex": {}, + "GenStr": "x%dc%dw%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "Chassis", + "MgmtSwitch" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "wW" + ], + "FunctionParameter": "mgmtSwitch" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "HSNLink", + "ParentType": "HSNAsic", + "ExampleString": "xXcCrRaAlL", + "Regex": {}, + "GenStr": "x%dc%dr%da%dl%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "HSNAsic", + "HSNLink" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "aA", + "lL" + ], + "FunctionParameter": "hSNLink" + } + ], + "Entry": { + "Type": "HSNAsic", + "ParentType": "RouterModule", + "ExampleString": "xXcCrRaA", + "Regex": {}, + "GenStr": "x%dc%dr%da%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "HSNAsic" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "aA" + ], + "FunctionParameter": "hSNAsic" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "HSNBoard", + "ParentType": "RouterModule", + "ExampleString": "xXcCrReE", + "Regex": {}, + "GenStr": "x%dc%dr%de%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "HSNBoard" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "eE" + ], + "FunctionParameter": "hSNBoard" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "HSNConnectorPort", + "ParentType": "HSNConnector", + "ExampleString": "xXcCrRjJpP", + "Regex": {}, + "GenStr": "x%dc%dr%dj%dp%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "HSNConnector", + "HSNConnectorPort" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "jJ", + "pP" + ], + "FunctionParameter": "hSNConnectorPort" + } + ], + "Entry": { + "Type": "HSNConnector", + "ParentType": "RouterModule", + "ExampleString": "xXcCrRjJ", + "Regex": {}, + "GenStr": "x%dc%dr%dj%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "HSNConnector" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "jJ" + ], + "FunctionParameter": "hSNConnector" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "RouterBMCNic", + "ParentType": "RouterBMC", + "ExampleString": "xXcCrRbBiI", + "Regex": {}, + "GenStr": "x%dc%dr%db%di%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "RouterBMC", + "RouterBMCNic" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "bB", + "iI" + ], + "FunctionParameter": "routerBMCNic" + } + ], + "Entry": { + "Type": "RouterBMC", + "ParentType": "RouterModule", + "ExampleString": "xXcCrRbB", + "Regex": {}, + "GenStr": "x%dc%dr%db%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "RouterBMC" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "bB" + ], + "FunctionParameter": "routerBMC" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "RouterFpga", + "ParentType": "RouterModule", + "ExampleString": "xXcCrRfF", + "Regex": {}, + "GenStr": "x%dc%dr%df%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "RouterFpga" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "fF" + ], + "FunctionParameter": "routerFpga" + }, + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "RouterPowerConnector", + "ParentType": "RouterModule", + "ExampleString": "xXcCrRvV", + "Regex": {}, + "GenStr": "x%dc%dr%dv%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "RouterPowerConnector" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "vV" + ], + "FunctionParameter": "routerPowerConnector" + }, + { + "Parent": null, + "Children": [ + { + "Parent": null, + "Children": null, + "Entry": { + "Type": "RouterTORFpga", + "ParentType": "RouterTOR", + "ExampleString": "xXcCrRtTfF", + "Regex": {}, + "GenStr": "x%dc%dr%dt%df%d", + "NumArgs": 5 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "RouterTOR", + "RouterTORFpga" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "tT", + "fF" + ], + "FunctionParameter": "routerTORFpga" + } + ], + "Entry": { + "Type": "RouterTOR", + "ParentType": "RouterModule", + "ExampleString": "xXcCrRtT", + "Regex": {}, + "GenStr": "x%dc%dr%dt%d", + "NumArgs": 4 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule", + "RouterTOR" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR", + "tT" + ], + "FunctionParameter": "routerTOR" + } + ], + "Entry": { + "Type": "RouterModule", + "ParentType": "Chassis", + "ExampleString": "xXcCrR", + "Regex": {}, + "GenStr": "x%dc%dr%d", + "NumArgs": 3 + }, + "Fields": [ + "Cabinet", + "Chassis", + "RouterModule" + ], + "FieldPlaceHolders": [ + "xX", + "cC", + "rR" + ], + "FunctionParameter": "routerModule" + } + ], + "Entry": { + "Type": "Chassis", + "ParentType": "Cabinet", + "ExampleString": "xXcC", + "Regex": {}, + "GenStr": "x%dc%d", + "NumArgs": 2 + }, + "Fields": [ + "Cabinet", + "Chassis" + ], + "FieldPlaceHolders": [ + "xX", + "cC" + ], + "FunctionParameter": "chassis" + } + ], + "Entry": { + "Type": "Cabinet", + "ParentType": "System", + "ExampleString": "xX", + "Regex": {}, + "GenStr": "x%d", + "NumArgs": 1 + }, + "Fields": [ + "Cabinet" + ], + "FieldPlaceHolders": [ + "xX" + ], + "FunctionParameter": "cabinet" + } + ], + "Entry": { + "Type": "System", + "ParentType": "INVALID", + "ExampleString": "sS", + "Regex": {}, + "GenStr": "s0", + "NumArgs": 0 + }, + "Fields": null, + "FieldPlaceHolders": null, + "FunctionParameter": "system" +} \ No newline at end of file diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000..bc52e96f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 00000000..79299478 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000..205c28d6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000..1be8ce94 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000..2e3d22f3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000..aacaac6f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000..f78d89fc --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000..b04edb7d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 00000000..32c0e338 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 00000000..c67dad61 --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 00000000..003e99fa --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 00000000..f38ec595 --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go new file mode 100644 index 00000000..bf89ecd2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -0,0 +1,622 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Condition(t, comp, append([]interface{}{msg}, args...)...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Contains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return DirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Empty(t, object, append([]interface{}{msg}, args...)...) +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Error(t, err, append([]interface{}{msg}, args...)...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return False(t, value, append([]interface{}{msg}, args...)...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return FileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Len(t, object, length, append([]interface{}{msg}, args...)...) +} + +// Lessf asserts that the first element is less than the second +// +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Less(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Nil(t, object, append([]interface{}{msg}, args...)...) +} + +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoDirExists(t, path, append([]interface{}{msg}, args...)...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoError(t, err, append([]interface{}{msg}, args...)...) +} + +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NoFileExists(t, path, append([]interface{}{msg}, args...)...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEmpty(t, object, append([]interface{}{msg}, args...)...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotNil(t, object, append([]interface{}{msg}, args...)...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotPanics(t, f, append([]interface{}{msg}, args...)...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// NotSamef asserts that two pointers do not reference the same object. +// +// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotZero(t, i, append([]interface{}{msg}, args...)...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Panics(t, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) +} + +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Same(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Subset(t, list, subset, append([]interface{}{msg}, args...)...) +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return True(t, value, append([]interface{}{msg}, args...)...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Zero(t, i, append([]interface{}{msg}, args...)...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl new file mode 100644 index 00000000..d2bb0b81 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl @@ -0,0 +1,5 @@ +{{.CommentFormat}} +func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { + if h, ok := t.(tHelper); ok { h.Helper() } + return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 00000000..75ecdcaa --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,1232 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package assert + +import ( + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Errorf(a.t, err, msg, args...) +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return FileExistsf(a.t, path, msg, args...) +} + +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greaterf(a.t, e1, e2, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, 22/7.0, 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lenf(a.t, object, length, msg, args...) +} + +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lessf(a.t, e1, e2, msg, args...) +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Never(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Neverf(a.t, condition, waitFor, tick, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Nilf(a.t, object, msg, args...) +} + +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoDirExists(a.t, path, msgAndArgs...) +} + +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoDirExistsf(a.t, path, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoErrorf(a.t, err, msg, args...) +} + +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoFileExists(a.t, path, msgAndArgs...) +} + +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NoFileExistsf(a.t, path, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSame asserts that two pointers do not reference the same object. +// +// a.NotSame(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSame(a.t, expected, actual, msgAndArgs...) +} + +// NotSamef asserts that two pointers do not reference the same object. +// +// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSamef(a.t, expected, actual, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithError("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithError(a.t, errString, f, msgAndArgs...) +} + +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithErrorf(a.t, errString, f, msg, args...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Regexpf(a.t, rx, str, msg, args...) +} + +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Samef(a.t, expected, actual, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEqf(a.t, expected, actual, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 00000000..188bb9e1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,5 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + if h, ok := a.t.(tHelper); ok { h.Helper() } + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 00000000..15a486ca --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,309 @@ +package assert + +import ( + "fmt" + "reflect" +) + +func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { + switch kind { + case reflect.Int: + { + intobj1 := obj1.(int) + intobj2 := obj2.(int) + if intobj1 > intobj2 { + return -1, true + } + if intobj1 == intobj2 { + return 0, true + } + if intobj1 < intobj2 { + return 1, true + } + } + case reflect.Int8: + { + int8obj1 := obj1.(int8) + int8obj2 := obj2.(int8) + if int8obj1 > int8obj2 { + return -1, true + } + if int8obj1 == int8obj2 { + return 0, true + } + if int8obj1 < int8obj2 { + return 1, true + } + } + case reflect.Int16: + { + int16obj1 := obj1.(int16) + int16obj2 := obj2.(int16) + if int16obj1 > int16obj2 { + return -1, true + } + if int16obj1 == int16obj2 { + return 0, true + } + if int16obj1 < int16obj2 { + return 1, true + } + } + case reflect.Int32: + { + int32obj1 := obj1.(int32) + int32obj2 := obj2.(int32) + if int32obj1 > int32obj2 { + return -1, true + } + if int32obj1 == int32obj2 { + return 0, true + } + if int32obj1 < int32obj2 { + return 1, true + } + } + case reflect.Int64: + { + int64obj1 := obj1.(int64) + int64obj2 := obj2.(int64) + if int64obj1 > int64obj2 { + return -1, true + } + if int64obj1 == int64obj2 { + return 0, true + } + if int64obj1 < int64obj2 { + return 1, true + } + } + case reflect.Uint: + { + uintobj1 := obj1.(uint) + uintobj2 := obj2.(uint) + if uintobj1 > uintobj2 { + return -1, true + } + if uintobj1 == uintobj2 { + return 0, true + } + if uintobj1 < uintobj2 { + return 1, true + } + } + case reflect.Uint8: + { + uint8obj1 := obj1.(uint8) + uint8obj2 := obj2.(uint8) + if uint8obj1 > uint8obj2 { + return -1, true + } + if uint8obj1 == uint8obj2 { + return 0, true + } + if uint8obj1 < uint8obj2 { + return 1, true + } + } + case reflect.Uint16: + { + uint16obj1 := obj1.(uint16) + uint16obj2 := obj2.(uint16) + if uint16obj1 > uint16obj2 { + return -1, true + } + if uint16obj1 == uint16obj2 { + return 0, true + } + if uint16obj1 < uint16obj2 { + return 1, true + } + } + case reflect.Uint32: + { + uint32obj1 := obj1.(uint32) + uint32obj2 := obj2.(uint32) + if uint32obj1 > uint32obj2 { + return -1, true + } + if uint32obj1 == uint32obj2 { + return 0, true + } + if uint32obj1 < uint32obj2 { + return 1, true + } + } + case reflect.Uint64: + { + uint64obj1 := obj1.(uint64) + uint64obj2 := obj2.(uint64) + if uint64obj1 > uint64obj2 { + return -1, true + } + if uint64obj1 == uint64obj2 { + return 0, true + } + if uint64obj1 < uint64obj2 { + return 1, true + } + } + case reflect.Float32: + { + float32obj1 := obj1.(float32) + float32obj2 := obj2.(float32) + if float32obj1 > float32obj2 { + return -1, true + } + if float32obj1 == float32obj2 { + return 0, true + } + if float32obj1 < float32obj2 { + return 1, true + } + } + case reflect.Float64: + { + float64obj1 := obj1.(float64) + float64obj2 := obj2.(float64) + if float64obj1 > float64obj2 { + return -1, true + } + if float64obj1 == float64obj2 { + return 0, true + } + if float64obj1 < float64obj2 { + return 1, true + } + } + case reflect.String: + { + stringobj1 := obj1.(string) + stringobj2 := obj2.(string) + if stringobj1 > stringobj2 { + return -1, true + } + if stringobj1 == stringobj2 { + return 0, true + } + if stringobj1 < stringobj2 { + return 1, true + } + } + } + + return 0, false +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 00000000..bdd81389 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1626 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "reflect" + "regexp" + "runtime" + "runtime/debug" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" + yaml "gopkg.in/yaml.v2" +) + +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful +// for table driven tests. +type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool + +// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful +// for table driven tests. +type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool + +// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful +// for table driven tests. +type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool + +// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful +// for table driven tests. +type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + if expected == nil || actual == nil { + return expected == actual + } + + exp, ok := expected.([]byte) + if !ok { + return reflect.DeepEqual(expected, actual) + } + + act, ok := actual.([]byte) + if !ok { + return false + } + if exp == nil || act == nil { + return exp == nil && act == nil + } + return bytes.Equal(exp, act) +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occurred in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + if len(parts) > 1 { + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + msg := msgAndArgs[0] + if msgAsStr, ok := msg.(string); ok { + return msgAsStr + } + return fmt.Sprintf("%+v", msg) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) + if i != 0 { + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, + {"Error", failureMessage}, + } + + // Add test name if the Go version supports it + if n, ok := t.(interface { + Name() string + }); ok { + content = append(content, labeledContent{"Test", n.Name()}) + } + + message := messageFromMsgAndArgs(msgAndArgs...) + if len(message) > 0 { + content = append(content, labeledContent{"Messages", message}) + } + + t.Errorf("\n%s", ""+labeledOutput(content...)) + + return false +} + +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) + } + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if expected == nil && actual == nil { + return nil + } + + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !samePointers(expected, actual) { + return Fail(t, fmt.Sprintf("Not same: \n"+ + "expected: %p %#v\n"+ + "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + } + + return true +} + +// NotSame asserts that two pointers do not reference the same object. +// +// assert.NotSame(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if samePointers(expected, actual) { + return Fail(t, fmt.Sprintf( + "Expected and actual point to the same object: %p %#v", + expected, expected), msgAndArgs...) + } + return true +} + +// samePointers compares two generic interface objects and returns whether +// they point to the same object +func samePointers(first, second interface{}) bool { + firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) + if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { + return false + } + + firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) + if firstType != secondType { + return false + } + + // compare pointer addresses + return first == second +} + +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + switch expected.(type) { + case time.Duration: + return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual) + } + return fmt.Sprintf("%#v", expected), fmt.Sprintf("%#v", actual) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if !ObjectsAreEqualValues(expected, actual) { + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "actual : %s%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// containsKind checks if a specified kind in the slice of kinds. +func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { + for i := 0; i < len(kinds); i++ { + if kind == kinds[i] { + return true + } + } + + return false +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + isNilableKind := containsKind( + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice}, + kind) + + if isNilableKind && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + // get nil case out of the way + if object == nil { + return true + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + // collection types are empty when they have no element + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + return objValue.Len() == 0 + // pointers are empty if nil or if the value they point to is empty + case reflect.Ptr: + if objValue.IsNil() { + return true + } + deref := objValue.Elem().Interface() + return isEmpty(deref) + // for all other types, compare against the zero value + default: + zero := reflect.Zero(objValue.Type()) + return reflect.DeepEqual(object, zero.Interface()) + } +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err := validateEqualArgs(expected, actual); err != nil { + return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", + expected, actual, err), msgAndArgs...) + } + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + listKind := reflect.TypeOf(list).Kind() + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if listKind == reflect.String { + elementValue := reflect.ValueOf(element) + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if listKind == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return true // we consider nil to be equal to the nil set + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + } + } + + return true +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if subset == nil { + return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + } + + subsetValue := reflect.ValueOf(subset) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + + listKind := reflect.TypeOf(list).Kind() + subsetKind := reflect.TypeOf(subset).Kind() + + if listKind != reflect.Array && listKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) + } + + if subsetKind != reflect.Array && subsetKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) + } + + for i := 0; i < subsetValue.Len(); i++ { + element := subsetValue.Index(i).Interface() + ok, found := includeElement(list, element) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + } + if !found { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return true + } + + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() + + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) + } + + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) + } + + aValue := reflect.ValueOf(listA) + bValue := reflect.ValueOf(listB) + + aLen := aValue.Len() + bLen := bValue.Len() + + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + + // Mark indexes in bValue that we already used + visited := make([]bool, bLen) + for i := 0; i < aLen; i++ { + element := aValue.Index(i).Interface() + found := false + for j := 0; j < bLen; j++ { + if visited[j] { + continue + } + if ObjectsAreEqual(bValue.Index(j).Interface(), element) { + visited[j] = true + found = true + break + } + } + if !found { + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) + } + } + + return true +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}, string) { + + didPanic := false + var message interface{} + var stack string + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + stack = string(debug.Stack()) + } + }() + + // call the target function + f() + + }() + + return didPanic, message, stack + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + + return true +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + funcDidPanic, panicValue, panickedStack := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + if panicValue != expected { + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...) + } + + return true +} + +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + funcDidPanic, panicValue, panickedStack := didPanic(f) + if !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) + } + panicErr, ok := panicValue.(error) + if !ok || panicErr.Error() != errString { + return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + case time.Duration: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) + if !result { + return result + } + } + + return true +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Map || + reflect.TypeOf(expected).Kind() != reflect.Map { + return Fail(t, "Arguments must be maps", msgAndArgs...) + } + + expectedMap := reflect.ValueOf(expected) + actualMap := reflect.ValueOf(actual) + + if expectedMap.Len() != actualMap.Len() { + return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) + } + + for _, k := range expectedMap.MapKeys() { + ev := expectedMap.MapIndex(k) + av := actualMap.MapIndex(k) + + if !ev.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) + } + + if !av.IsValid() { + return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) + } + + if !InDelta( + t, + ev.Interface(), + av.Interface(), + delta, + msgAndArgs..., + ) { + return false + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err != nil { + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) + } + + return true +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if err == nil { + return Fail(t, "An error is expected but got nil.", msgAndArgs...) + } + + return true +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "actual : %q", expected, actual), msgAndArgs...) + } + return true +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) + } + return true +} + +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + return true + } + if info.IsDir() { + return true + } + return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...) +} + +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) + } + return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) + } + if !info.IsDir() { + return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) + } + return true +} + +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + info, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return true + } + return true + } + if !info.IsDir() { + return true + } + return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedYAMLAsInterface, actualYAMLAsInterface interface{} + + if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice, array or string. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { + return "" + } + + var e, a string + if et != reflect.TypeOf("") { + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) + } else { + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() + } + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} + +func isFunction(arg interface{}) bool { + if arg == nil { + return false + } + return reflect.TypeOf(arg).Kind() == reflect.Func +} + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} + +type tHelper interface { + Helper() +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ch := make(chan bool, 1) + + timer := time.NewTimer(waitFor) + defer timer.Stop() + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + for tick := ticker.C; ; { + select { + case <-timer.C: + return Fail(t, "Condition never satisfied", msgAndArgs...) + case <-tick: + tick = nil + go func() { ch <- condition() }() + case v := <-ch: + if v { + return true + } + tick = ticker.C + } + } +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + ch := make(chan bool, 1) + + timer := time.NewTimer(waitFor) + defer timer.Stop() + + ticker := time.NewTicker(tick) + defer ticker.Stop() + + for tick := ticker.C; ; { + select { + case <-timer.C: + return true + case <-tick: + tick = nil + go func() { ch <- condition() }() + case v := <-ch: + if v { + return Fail(t, "Condition satisfied", msgAndArgs...) + } + tick = ticker.C + } + } +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 00000000..c9dccc4d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 00000000..ac9dc9d1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 00000000..df189d23 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 00000000..df46fa77 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,143 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 and +// an error if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url, nil) + if err != nil { + return -1, err + } + req.URL.RawQuery = values.Encode() + handler(w, req) + return w.Code, nil +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent + if !isSuccessCode { + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isSuccessCode +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect + if !isRedirectCode { + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isRedirectCode +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + code, err := httpCode(handler, method, url, values) + if err != nil { + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false + } + + isErrorCode := code >= http.StatusBadRequest + if !isErrorCode { + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + } + + return isErrorCode +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return !contains +} diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go new file mode 100644 index 00000000..169de392 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -0,0 +1,28 @@ +// Package require implements the same assertions as the `assert` package but +// stops test execution when a test fails. +// +// Example Usage +// +// The following is a complete example using require in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/require" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// require.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// Assertions +// +// The `require` package have same global functions as in the `assert` package, +// but instead of returning a boolean result they call `t.FailNow()`. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package require diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go new file mode 100644 index 00000000..1dcb2338 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -0,0 +1,16 @@ +package require + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go new file mode 100644 index 00000000..cf6c7b56 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -0,0 +1,1575 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Condition(t, comp, msgAndArgs...) { + return + } + t.FailNow() +} + +// Conditionf uses a Comparison to assert a complex condition. +func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Conditionf(t, comp, msg, args...) { + return + } + t.FailNow() +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World") +// assert.Contains(t, ["Hello", "World"], "World") +// assert.Contains(t, {"Hello": "World"}, "Hello") +func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Contains(t, s, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Containsf(t, s, contains, msg, args...) { + return + } + t.FailNow() +} + +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.DirExists(t, path, msgAndArgs...) { + return + } + t.FailNow() +} + +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.DirExistsf(t, path, msg, args...) { + return + } + t.FailNow() +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.ElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Empty(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Emptyf(t, obj, "error message %s", "formatted") +func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Emptyf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Equal(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualError(t, err, expectedErrorString) +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.EqualError(t, theError, errString, msgAndArgs...) { + return + } + t.FailNow() +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.EqualErrorf(t, theError, errString, msg, args...) { + return + } + t.FailNow() +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123)) +func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.EqualValues(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) +func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.EqualValuesf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Equalf asserts that two objects are equal. +// +// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Equalf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err) { +// assert.Equal(t, expectedError, err) +// } +func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Error(t, err, msgAndArgs...) { + return + } + t.FailNow() +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Errorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Errorf(t, err, msg, args...) { + return + } + t.FailNow() +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { + return + } + t.FailNow() +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { + return + } + t.FailNow() +} + +// Exactly asserts that two objects are equal in value and type. +// +// assert.Exactly(t, int32(123), int64(123)) +func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Exactly(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) +func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Exactlyf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Fail(t, failureMessage, msgAndArgs...) { + return + } + t.FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FailNow(t, failureMessage, msgAndArgs...) { + return + } + t.FailNow() +} + +// FailNowf fails test +func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FailNowf(t, failureMessage, msg, args...) { + return + } + t.FailNow() +} + +// Failf reports a failure through +func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Failf(t, failureMessage, msg, args...) { + return + } + t.FailNow() +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool) +func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.False(t, value, msgAndArgs...) { + return + } + t.FailNow() +} + +// Falsef asserts that the specified value is false. +// +// assert.Falsef(t, myBool, "error message %s", "formatted") +func Falsef(t TestingT, value bool, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Falsef(t, value, msg, args...) { + return + } + t.FailNow() +} + +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FileExists(t, path, msgAndArgs...) { + return + } + t.FailNow() +} + +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.FileExistsf(t, path, msg, args...) { + return + } + t.FailNow() +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Greater(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqual(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqualf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Greaterf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } + t.FailNow() +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { + return + } + t.FailNow() +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } + t.FailNow() +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { + return + } + t.FailNow() +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + return + } + t.FailNow() +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { + return + } + t.FailNow() +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + return + } + t.FailNow() +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { + return + } + t.FailNow() +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + return + } + t.FailNow() +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { + return + } + t.FailNow() +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Implements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Implementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + return + } + t.FailNow() +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + return + } + t.FailNow() +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + return + } + t.FailNow() +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + return + } + t.FailNow() +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + return + } + t.FailNow() +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InDeltaf(t, expected, actual, delta, msg, args...) { + return + } + t.FailNow() +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + return + } + t.FailNow() +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + return + } + t.FailNow() +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + return + } + t.FailNow() +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + return + } + t.FailNow() +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsType(t, expectedType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsTypef asserts that the specified objects are of the same type. +func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsTypef(t, expectedType, object, msg, args...) { + return + } + t.FailNow() +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Len(t, object, length, msgAndArgs...) { + return + } + t.FailNow() +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Lenf(t, object, length, msg, args...) { + return + } + t.FailNow() +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Less(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.LessOrEqual(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.LessOrEqualf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Lessf asserts that the first element is less than the second +// +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Lessf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Never(t, condition, waitFor, tick, msgAndArgs...) { + return + } + t.FailNow() +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Neverf(t, condition, waitFor, tick, msg, args...) { + return + } + t.FailNow() +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err) +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Nil(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// Nilf asserts that the specified object is nil. +// +// assert.Nilf(t, err, "error message %s", "formatted") +func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Nilf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoDirExists(t, path, msgAndArgs...) { + return + } + t.FailNow() +} + +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoDirExistsf(t, path, msg, args...) { + return + } + t.FailNow() +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoError(t, err, msgAndArgs...) { + return + } + t.FailNow() +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoErrorf(t, err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoErrorf(t, err, msg, args...) { + return + } + t.FailNow() +} + +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoFileExists(t, path, msgAndArgs...) { + return + } + t.FailNow() +} + +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NoFileExistsf(t, path, msg, args...) { + return + } + t.FailNow() +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth") +// assert.NotContains(t, ["Hello", "World"], "Earth") +// assert.NotContains(t, {"Hello": "World"}, "Earth") +func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotContains(t, s, contains, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotContainsf(t, s, contains, msg, args...) { + return + } + t.FailNow() +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEmpty(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEmptyf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEqual(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotEqualf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err) +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotNil(t, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotNilf asserts that the specified object is not nil. +// +// assert.NotNilf(t, err, "error message %s", "formatted") +func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotNilf(t, object, msg, args...) { + return + } + t.FailNow() +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ RemainCalm() }) +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotPanics(t, f, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotPanicsf(t, f, msg, args...) { + return + } + t.FailNow() +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotRegexp(t, rx, str, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotRegexpf(t, rx, str, msg, args...) { + return + } + t.FailNow() +} + +// NotSame asserts that two pointers do not reference the same object. +// +// assert.NotSame(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotSame(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotSamef asserts that two pointers do not reference the same object. +// +// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotSamef(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotSubset(t, list, subset, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotSubsetf(t, list, subset, msg, args...) { + return + } + t.FailNow() +} + +// NotZero asserts that i is not the zero value for its type. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotZero(t, i, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotZerof asserts that i is not the zero value for its type. +func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotZerof(t, i, msg, args...) { + return + } + t.FailNow() +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ GoCrazy() }) +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Panics(t, f, msgAndArgs...) { + return + } + t.FailNow() +} + +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.PanicsWithError(t, errString, f, msgAndArgs...) { + return + } + t.FailNow() +} + +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.PanicsWithErrorf(t, errString, f, msg, args...) { + return + } + t.FailNow() +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + return + } + t.FailNow() +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.PanicsWithValuef(t, expected, f, msg, args...) { + return + } + t.FailNow() +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Panicsf(t, f, msg, args...) { + return + } + t.FailNow() +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Regexp(t, rx, str, msgAndArgs...) { + return + } + t.FailNow() +} + +// Regexpf asserts that a specified regexp matches a string. +// +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Regexpf(t, rx, str, msg, args...) { + return + } + t.FailNow() +} + +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Same(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Samef(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Subset(t, list, subset, msgAndArgs...) { + return + } + t.FailNow() +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Subsetf(t, list, subset, msg, args...) { + return + } + t.FailNow() +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool) +func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.True(t, value, msgAndArgs...) { + return + } + t.FailNow() +} + +// Truef asserts that the specified value is true. +// +// assert.Truef(t, myBool, "error message %s", "formatted") +func Truef(t TestingT, value bool, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Truef(t, value, msg, args...) { + return + } + t.FailNow() +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + return + } + t.FailNow() +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + return + } + t.FailNow() +} + +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Zero asserts that i is the zero value for its type. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Zero(t, i, msgAndArgs...) { + return + } + t.FailNow() +} + +// Zerof asserts that i is the zero value for its type. +func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Zerof(t, i, msg, args...) { + return + } + t.FailNow() +} diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl new file mode 100644 index 00000000..55e42dde --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -0,0 +1,6 @@ +{{.Comment}} +func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { + if h, ok := t.(tHelper); ok { h.Helper() } + if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } + t.FailNow() +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go new file mode 100644 index 00000000..5aac226d --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -0,0 +1,1233 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND + */ + +package require + +import ( + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Condition(a.t, comp, msgAndArgs...) +} + +// Conditionf uses a Comparison to assert a complex condition. +func (a *Assertions) Conditionf(comp assert.Comparison, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Conditionf(a.t, comp, msg, args...) +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World") +// a.Contains(["Hello", "World"], "World") +// a.Contains({"Hello": "World"}, "Hello") +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Contains(a.t, s, contains, msgAndArgs...) +} + +// Containsf asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Containsf("Hello World", "World", "error message %s", "formatted") +// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") +// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") +func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Containsf(a.t, s, contains, msg, args...) +} + +// DirExists checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + DirExists(a.t, path, msgAndArgs...) +} + +// DirExistsf checks whether a directory exists in the given path. It also fails +// if the path is a file rather a directory or there is an error checking whether it exists. +func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + DirExistsf(a.t, path, msg, args...) +} + +// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) +func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should match. +// +// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + ElementsMatchf(a.t, listA, listB, msg, args...) +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Empty(a.t, object, msgAndArgs...) +} + +// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Emptyf(obj, "error message %s", "formatted") +func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Emptyf(a.t, object, msg, args...) +} + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Equal(a.t, expected, actual, msgAndArgs...) +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualError(err, expectedErrorString) +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + EqualError(a.t, theError, errString, msgAndArgs...) +} + +// EqualErrorf asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") +func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + EqualErrorf(a.t, theError, errString, msg, args...) +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123)) +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + EqualValues(a.t, expected, actual, msgAndArgs...) +} + +// EqualValuesf asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) +func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + EqualValuesf(a.t, expected, actual, msg, args...) +} + +// Equalf asserts that two objects are equal. +// +// a.Equalf(123, 123, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). Function equality +// cannot be determined and will always fail. +func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Equalf(a.t, expected, actual, msg, args...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err) { +// assert.Equal(t, expectedError, err) +// } +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Error(a.t, err, msgAndArgs...) +} + +// Errorf asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Errorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedErrorf, err) +// } +func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Errorf(a.t, err, msg, args...) +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + +// Exactly asserts that two objects are equal in value and type. +// +// a.Exactly(int32(123), int64(123)) +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Exactly(a.t, expected, actual, msgAndArgs...) +} + +// Exactlyf asserts that two objects are equal in value and type. +// +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) +func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Exactlyf(a.t, expected, actual, msg, args...) +} + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Fail(a.t, failureMessage, msgAndArgs...) +} + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + FailNow(a.t, failureMessage, msgAndArgs...) +} + +// FailNowf fails test +func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + FailNowf(a.t, failureMessage, msg, args...) +} + +// Failf reports a failure through +func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Failf(a.t, failureMessage, msg, args...) +} + +// False asserts that the specified value is false. +// +// a.False(myBool) +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + False(a.t, value, msgAndArgs...) +} + +// Falsef asserts that the specified value is false. +// +// a.Falsef(myBool, "error message %s", "formatted") +func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Falsef(a.t, value, msg, args...) +} + +// FileExists checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + FileExists(a.t, path, msgAndArgs...) +} + +// FileExistsf checks whether a file exists in the given path. It also fails if +// the path points to a directory or there is an error when trying to check the file. +func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + FileExistsf(a.t, path, msg, args...) +} + +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greaterf(a.t, e1, e2, msg, args...) +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyContainsf asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) +} + +// HTTPBodyNotContainsf asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPError(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPErrorf asserts that a specified handler returns an error status code. +// +// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPErrorf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPRedirectf asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). +func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPRedirectf(a.t, handler, method, url, values, msg, args...) +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) +} + +// HTTPSuccessf asserts that a specified handler returns a success status code. +// +// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + HTTPSuccessf(a.t, handler, method, url, values, msg, args...) +} + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Implements(a.t, interfaceObject, object, msgAndArgs...) +} + +// Implementsf asserts that an object is implemented by the specified interface. +// +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) +func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Implementsf(a.t, interfaceObject, object, msg, args...) +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, 22/7.0, 0.01) +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. +func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + +// InDeltaSlicef is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InDeltaSlicef(a.t, expected, actual, delta, msg, args...) +} + +// InDeltaf asserts that the two numerals are within delta of each other. +// +// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InDeltaf(a.t, expected, actual, delta, msg, args...) +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) +} + +// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) +} + +// InEpsilonf asserts that expected and actual have a relative error less than epsilon +func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + InEpsilonf(a.t, expected, actual, epsilon, msg, args...) +} + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsType(a.t, expectedType, object, msgAndArgs...) +} + +// IsTypef asserts that the specified objects are of the same type. +func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsTypef(a.t, expectedType, object, msg, args...) +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + JSONEq(a.t, expected, actual, msgAndArgs...) +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + JSONEqf(a.t, expected, actual, msg, args...) +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3) +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Len(a.t, object, length, msgAndArgs...) +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// a.Lenf(mySlice, 3, "error message %s", "formatted") +func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Lenf(a.t, object, length, msg, args...) +} + +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Lessf(a.t, e1, e2, msg, args...) +} + +// Never asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Never(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Neverf asserts that the given condition doesn't satisfy in waitFor time, +// periodically checking the target function each tick. +// +// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Neverf(a.t, condition, waitFor, tick, msg, args...) +} + +// Nil asserts that the specified object is nil. +// +// a.Nil(err) +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Nil(a.t, object, msgAndArgs...) +} + +// Nilf asserts that the specified object is nil. +// +// a.Nilf(err, "error message %s", "formatted") +func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Nilf(a.t, object, msg, args...) +} + +// NoDirExists checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoDirExists(a.t, path, msgAndArgs...) +} + +// NoDirExistsf checks whether a directory does not exist in the given path. +// It fails if the path points to an existing _directory_ only. +func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoDirExistsf(a.t, path, msg, args...) +} + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoError(a.t, err, msgAndArgs...) +} + +// NoErrorf asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoErrorf(err, "error message %s", "formatted") { +// assert.Equal(t, expectedObj, actualObj) +// } +func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoErrorf(a.t, err, msg, args...) +} + +// NoFileExists checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoFileExists(a.t, path, msgAndArgs...) +} + +// NoFileExistsf checks whether a file does not exist in a given path. It fails +// if the path points to an existing _file_ only. +func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NoFileExistsf(a.t, path, msg, args...) +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth") +// a.NotContains(["Hello", "World"], "Earth") +// a.NotContains({"Hello": "World"}, "Earth") +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotContains(a.t, s, contains, msgAndArgs...) +} + +// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") +// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") +// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") +func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotContainsf(a.t, s, contains, msg, args...) +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotEmpty(a.t, object, msgAndArgs...) +} + +// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmptyf(obj, "error message %s", "formatted") { +// assert.Equal(t, "two", obj[1]) +// } +func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotEmptyf(a.t, object, msg, args...) +} + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2) +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotEqual(a.t, expected, actual, msgAndArgs...) +} + +// NotEqualf asserts that the specified values are NOT equal. +// +// a.NotEqualf(obj1, obj2, "error message %s", "formatted") +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). +func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotEqualf(a.t, expected, actual, msg, args...) +} + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err) +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotNil(a.t, object, msgAndArgs...) +} + +// NotNilf asserts that the specified object is not nil. +// +// a.NotNilf(err, "error message %s", "formatted") +func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotNilf(a.t, object, msg, args...) +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ RemainCalm() }) +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotPanics(a.t, f, msgAndArgs...) +} + +// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") +func (a *Assertions) NotPanicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotPanicsf(a.t, f, msg, args...) +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotRegexp(a.t, rx, str, msgAndArgs...) +} + +// NotRegexpf asserts that a specified regexp does not match a string. +// +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") +// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") +func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotRegexpf(a.t, rx, str, msg, args...) +} + +// NotSame asserts that two pointers do not reference the same object. +// +// a.NotSame(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotSame(a.t, expected, actual, msgAndArgs...) +} + +// NotSamef asserts that two pointers do not reference the same object. +// +// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotSamef(a.t, expected, actual, msg, args...) +} + +// NotSubset asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotSubset(a.t, list, subset, msgAndArgs...) +} + +// NotSubsetf asserts that the specified list(array, slice...) contains not all +// elements given in the specified subset(array, slice...). +// +// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotSubsetf(a.t, list, subset, msg, args...) +} + +// NotZero asserts that i is not the zero value for its type. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotZero(a.t, i, msgAndArgs...) +} + +// NotZerof asserts that i is not the zero value for its type. +func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotZerof(a.t, i, msg, args...) +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ GoCrazy() }) +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Panics(a.t, f, msgAndArgs...) +} + +// PanicsWithError asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithError("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + PanicsWithError(a.t, errString, f, msgAndArgs...) +} + +// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc +// panics, and that the recovered panic value is an error that satisfies the +// EqualError comparison. +// +// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + PanicsWithErrorf(a.t, errString, f, msg, args...) +} + +// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) +func (a *Assertions) PanicsWithValue(expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + PanicsWithValue(a.t, expected, f, msgAndArgs...) +} + +// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that +// the recovered panic value equals the expected panic value. +// +// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) PanicsWithValuef(expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + PanicsWithValuef(a.t, expected, f, msg, args...) +} + +// Panicsf asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") +func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Panicsf(a.t, f, msg, args...) +} + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Regexp(a.t, rx, str, msgAndArgs...) +} + +// Regexpf asserts that a specified regexp matches a string. +// +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") +// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") +func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Regexpf(a.t, rx, str, msg, args...) +} + +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Samef(a.t, expected, actual, msg, args...) +} + +// Subset asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Subset(a.t, list, subset, msgAndArgs...) +} + +// Subsetf asserts that the specified list(array, slice...) contains all +// elements given in the specified subset(array, slice...). +// +// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Subsetf(a.t, list, subset, msg, args...) +} + +// True asserts that the specified value is true. +// +// a.True(myBool) +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + True(a.t, value, msgAndArgs...) +} + +// Truef asserts that the specified value is true. +// +// a.Truef(myBool, "error message %s", "formatted") +func (a *Assertions) Truef(value bool, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Truef(a.t, value, msg, args...) +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + +// WithinDurationf asserts that the two times are within duration delta of each other. +// +// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + WithinDurationf(a.t, expected, actual, delta, msg, args...) +} + +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEqf(a.t, expected, actual, msg, args...) +} + +// Zero asserts that i is the zero value for its type. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Zero(a.t, i, msgAndArgs...) +} + +// Zerof asserts that i is the zero value for its type. +func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Zerof(a.t, i, msg, args...) +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl new file mode 100644 index 00000000..54124df1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl @@ -0,0 +1,5 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { + if h, ok := a.t.(tHelper); ok { h.Helper() } + {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go new file mode 100644 index 00000000..91772dfe --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -0,0 +1,29 @@ +package require + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) + FailNow() +} + +type tHelper interface { + Helper() +} + +// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful +// for table driven tests. +type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) + +// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful +// for table driven tests. +type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) + +// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful +// for table driven tests. +type BoolAssertionFunc func(TestingT, bool, ...interface{}) + +// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful +// for table driven tests. +type ErrorAssertionFunc func(TestingT, error, ...interface{}) + +//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go new file mode 100644 index 00000000..f91a245d --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/doc.go @@ -0,0 +1,65 @@ +// Package suite contains logic for creating testing suite structs +// and running the methods on those structs as tests. The most useful +// piece of this package is that you can create setup/teardown methods +// on your testing suites, which will run before/after the whole suite +// or individual tests (depending on which interface(s) you +// implement). +// +// A testing suite is usually built by first extending the built-in +// suite functionality from suite.Suite in testify. Alternatively, +// you could reproduce that logic on your own if you wanted (you +// just need to implement the TestingSuite interface from +// suite/interfaces.go). +// +// After that, you can implement any of the interfaces in +// suite/interfaces.go to add setup/teardown functionality to your +// suite, and add any methods that start with "Test" to add tests. +// Methods that do not match any suite interfaces and do not begin +// with "Test" will not be run by testify, and can safely be used as +// helper methods. +// +// Once you've built your testing suite, you need to run the suite +// (using suite.Run from testify) inside any function that matches the +// identity that "go test" is already looking for (i.e. +// func(*testing.T)). +// +// Regular expression to select test suites specified command-line +// argument "-run". Regular expression to select the methods +// of test suites specified command-line argument "-m". +// Suite object has assertion methods. +// +// A crude example: +// // Basic imports +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// ) +// +// // Define the suite, and absorb the built-in basic suite +// // functionality from testify - including a T() method which +// // returns the current testing context +// type ExampleTestSuite struct { +// suite.Suite +// VariableThatShouldStartAtFive int +// } +// +// // Make sure that VariableThatShouldStartAtFive is set to five +// // before each test +// func (suite *ExampleTestSuite) SetupTest() { +// suite.VariableThatShouldStartAtFive = 5 +// } +// +// // All methods that begin with "Test" are run as tests within a +// // suite. +// func (suite *ExampleTestSuite) TestExample() { +// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +// suite.Equal(5, suite.VariableThatShouldStartAtFive) +// } +// +// // In order for 'go test' to run this suite, we need to create +// // a normal test function and pass our suite to suite.Run +// func TestExampleTestSuite(t *testing.T) { +// suite.Run(t, new(ExampleTestSuite)) +// } +package suite diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go new file mode 100644 index 00000000..b37cb040 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -0,0 +1,46 @@ +package suite + +import "testing" + +// TestingSuite can store and return the current *testing.T context +// generated by 'go test'. +type TestingSuite interface { + T() *testing.T + SetT(*testing.T) +} + +// SetupAllSuite has a SetupSuite method, which will run before the +// tests in the suite are run. +type SetupAllSuite interface { + SetupSuite() +} + +// SetupTestSuite has a SetupTest method, which will run before each +// test in the suite. +type SetupTestSuite interface { + SetupTest() +} + +// TearDownAllSuite has a TearDownSuite method, which will run after +// all the tests in the suite have been run. +type TearDownAllSuite interface { + TearDownSuite() +} + +// TearDownTestSuite has a TearDownTest method, which will run after +// each test in the suite. +type TearDownTestSuite interface { + TearDownTest() +} + +// BeforeTest has a function to be executed right before the test +// starts and receives the suite and test names as input +type BeforeTest interface { + BeforeTest(suiteName, testName string) +} + +// AfterTest has a function to be executed right after the test +// finishes and receives the suite and test names as input +type AfterTest interface { + AfterTest(suiteName, testName string) +} diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go new file mode 100644 index 00000000..61953018 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -0,0 +1,171 @@ +package suite + +import ( + "flag" + "fmt" + "os" + "reflect" + "regexp" + "runtime/debug" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var allTestsFilter = func(_, _ string) (bool, error) { return true, nil } +var matchMethod = flag.String("testify.m", "", "regular expression to select tests of the testify suite to run") + +// Suite is a basic testing suite with methods for storing and +// retrieving the current *testing.T context. +type Suite struct { + *assert.Assertions + require *require.Assertions + t *testing.T +} + +// T retrieves the current *testing.T context. +func (suite *Suite) T() *testing.T { + return suite.t +} + +// SetT sets the current *testing.T context. +func (suite *Suite) SetT(t *testing.T) { + suite.t = t + suite.Assertions = assert.New(t) + suite.require = require.New(t) +} + +// Require returns a require context for suite. +func (suite *Suite) Require() *require.Assertions { + if suite.require == nil { + suite.require = require.New(suite.T()) + } + return suite.require +} + +// Assert returns an assert context for suite. Normally, you can call +// `suite.NoError(expected, actual)`, but for situations where the embedded +// methods are overridden (for example, you might want to override +// assert.Assertions with require.Assertions), this method is provided so you +// can call `suite.Assert().NoError()`. +func (suite *Suite) Assert() *assert.Assertions { + if suite.Assertions == nil { + suite.Assertions = assert.New(suite.T()) + } + return suite.Assertions +} + +func failOnPanic(t *testing.T) { + r := recover() + if r != nil { + t.Errorf("test panicked: %v\n%s", r, debug.Stack()) + t.FailNow() + } +} + +// Run provides suite functionality around golang subtests. It should be +// called in place of t.Run(name, func(t *testing.T)) in test suite code. +// The passed-in func will be executed as a subtest with a fresh instance of t. +// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName. +func (suite *Suite) Run(name string, subtest func()) bool { + oldT := suite.T() + defer suite.SetT(oldT) + return oldT.Run(name, func(t *testing.T) { + suite.SetT(t) + subtest() + }) +} + +// Run takes a testing suite and runs all of the tests attached +// to it. +func Run(t *testing.T, suite TestingSuite) { + testsSync := &sync.WaitGroup{} + suite.SetT(t) + defer failOnPanic(t) + + suiteSetupDone := false + + methodFinder := reflect.TypeOf(suite) + tests := []testing.InternalTest{} + for index := 0; index < methodFinder.NumMethod(); index++ { + method := methodFinder.Method(index) + ok, err := methodFilter(method.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) + os.Exit(1) + } + if !ok { + continue + } + if !suiteSetupDone { + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + testsSync.Wait() + tearDownAllSuite.TearDownSuite() + } + }() + suiteSetupDone = true + } + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + defer testsSync.Done() + parentT := suite.T() + suite.SetT(t) + defer failOnPanic(t) + + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + if beforeTestSuite, ok := suite.(BeforeTest); ok { + beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + } + defer func() { + if afterTestSuite, ok := suite.(AfterTest); ok { + afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name) + } + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() + } + suite.SetT(parentT) + }() + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, + } + tests = append(tests, test) + testsSync.Add(1) + } + runTests(t, tests) +} + +func runTests(t testing.TB, tests []testing.InternalTest) { + r, ok := t.(runner) + if !ok { // backwards compatibility with Go 1.6 and below + if !testing.RunTests(allTestsFilter, tests) { + t.Fail() + } + return + } + + for _, test := range tests { + r.Run(test.Name, test.F) + } +} + +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} + +type runner interface { + Run(name string, f func(t *testing.T)) bool +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 00000000..055480b9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000..8da58fbf --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 00000000..866d74a7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000..b50c6e87 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000..1f7e87e6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000..129bc2a9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000..a1c2cc52 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000..0ee738e1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000..81d05dfe --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000..7c1f5fac --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000..4120e0c9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000..b33bdbae --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2718 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + for i := len(parser.simple_keys) - 1; i >= 0; i-- { + simple_key := &parser.simple_keys[i] + if simple_key.token_number < parser.tokens_parsed { + break + } + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + } else if valid && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000..4c45e660 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000..a2dde608 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000..89650e29 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000..e25cee56 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f052073d..196a7ac3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,8 +1,8 @@ # github.com/Cray-HPE/hms-sls/v2 v2.1.0 ## explicit; go 1.16 github.com/Cray-HPE/hms-sls/v2/pkg/sls-common -# github.com/Cray-HPE/hms-xname v1.1.0 -## explicit; go 1.17 +# github.com/Cray-HPE/hms-xname v1.1.1-0.20230602152417-25bcdeda83c9 +## explicit; go 1.20 github.com/Cray-HPE/hms-xname/xnames github.com/Cray-HPE/hms-xname/xnametypes # github.com/antihax/optional v1.0.0 @@ -14,6 +14,9 @@ github.com/chzyer/readline # github.com/cpuguy83/go-md2man/v2 v2.0.2 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man +# github.com/davecgh/go-spew v1.1.1 +## explicit +github.com/davecgh/go-spew/spew # github.com/fatih/color v1.13.0 ## explicit; go 1.13 github.com/fatih/color @@ -52,6 +55,9 @@ github.com/mattn/go-isatty # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure +# github.com/pmezard/go-difflib v1.0.0 +## explicit +github.com/pmezard/go-difflib/difflib # github.com/rogpeppe/go-internal v1.10.0 ## explicit; go 1.19 # github.com/rs/zerolog v1.29.1 @@ -73,6 +79,11 @@ github.com/spf13/cobra/doc # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/stretchr/testify v1.5.1 +## explicit; go 1.13 +github.com/stretchr/testify/assert +github.com/stretchr/testify/require +github.com/stretchr/testify/suite # go4.org/intern v0.0.0-20220617035311-6925f38cc365 ## explicit; go 1.13 go4.org/intern @@ -129,6 +140,9 @@ google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb # gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c ## explicit; go 1.11 +# gopkg.in/yaml.v2 v2.2.7 +## explicit +gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3