From 3a16b27bb054ea7f1ea3405a2d010e73b67f1e29 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Wed, 10 Jul 2024 13:02:25 +0000 Subject: [PATCH 01/19] Add Harbor *photon Rock images The Harbor project has some Photon-OS based images: * docker.io/goharbor/nginx-photon:v2.10.2 * docker.io/goharbor/registry-photon:v2.10.2 * docker.io/goharbor/trivy-adapter-photon:v2.10.2 * docker.io/goharbor/redis-photon:v2.10.2 * docker.io/goharbor/harbor-portal:v2.10.2 We're defining Rock drop-in replacements, which obviously will not be Photon based. Note that the reference images have an install_cert.sh script that is a no-op on non-Photon images. We're keeping it for now, mostly for consistency. --- v2.10.2/nginx-photon/rockcraft.yaml | 53 ++ v2.10.2/portal/rockcraft.yaml | 91 ++ v2.10.2/redis-photon/rockcraft.yaml | 64 ++ v2.10.2/registry-photon/redis.patch | 883 ++++++++++++++++++++ v2.10.2/registry-photon/rockcraft.yaml | 84 ++ v2.10.2/trivy-adapter-photon/rockcraft.yaml | 94 +++ 6 files changed, 1269 insertions(+) create mode 100644 v2.10.2/nginx-photon/rockcraft.yaml create mode 100644 v2.10.2/portal/rockcraft.yaml create mode 100644 v2.10.2/redis-photon/rockcraft.yaml create mode 100644 v2.10.2/registry-photon/redis.patch create mode 100644 v2.10.2/registry-photon/rockcraft.yaml create mode 100644 v2.10.2/trivy-adapter-photon/rockcraft.yaml diff --git a/v2.10.2/nginx-photon/rockcraft.yaml b/v2.10.2/nginx-photon/rockcraft.yaml new file mode 100644 index 0000000..3e64a61 --- /dev/null +++ b/v2.10.2/nginx-photon/rockcraft.yaml @@ -0,0 +1,53 @@ +name: nginx +summary: Rock replacement for the Harbor Nginx image. +description: > + This rock is a drop in replacement for the + docker.io/goharbor/nginx-photon:v2.10.2 image. +# Based on the following: +# https://github.com/goharbor/harbor/tree/v2.10.2/make/photon/nginx +version: v2.10.2 +license: Apache-2.0 + +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +services: + nginx: + command: nginx -g daemon off + override: replace + startup: enabled + user: nginx + group: nginx + +parts: + nginx-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -g 10000 nginx + useradd -R $CRAFT_OVERLAY -u 10000 -g 10000 \ + -d /home/nginx -s /bin/bash -m nginx + + nginx: + after: [nginx-user] + plugin: nil + stage-packages: [nginx] + override-build: | + chown -R 10000:10000 $CRAFT_PART_INSTALL/etc/nginx + + mkdir -p $CRAFT_PART_INSTALL/var/log/nginx + chown -R 10000:10000 $CRAFT_PART_INSTALL/var/log/nginx + + # The reference image creates these symlinks, however a volume is expected + # to be mounted at the given location. + # + # Also, the github image builder job fails when having these links: + # https://paste.ubuntu.com/p/zsDHyR2NY4/plain/ + # + # ln -sf /dev/stdout $CRAFT_PART_INSTALL/var/log/nginx/access.log + # ln -sf /dev/stderr $CRAFT_PART_INSTALL/var/log/nginx/error.log + + # TODO: the upstream image defines a healthcheck, stop signal and a volume, + # should/can we do the same? diff --git a/v2.10.2/portal/rockcraft.yaml b/v2.10.2/portal/rockcraft.yaml new file mode 100644 index 0000000..31bbefb --- /dev/null +++ b/v2.10.2/portal/rockcraft.yaml @@ -0,0 +1,91 @@ +name: portal +summary: Rock replacement for the Harbor Portal image. +description: > + This rock is a drop in replacement for the + docker.io/goharbor/harbor-portal:v2.10.2 image. +# Based on the following: +# https://github.com/goharbor/harbor/tree/v2.10.2/make/photon/portal +version: v2.10.2 +license: Apache-2.0 + +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +services: + nginx: + command: nginx -g daemon off + override: replace + startup: enabled + user: nginx + group: nginx + +parts: + nginx-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -g 10000 nginx + useradd -R $CRAFT_OVERLAY -u 10000 -g 10000 \ + -d /home/nginx -s /bin/bash -m nginx + + nginx: + after: [nginx-user] + plugin: nil + stage-packages: [nginx] + override-build: | + chown -R 10000:10000 $CRAFT_PART_INSTALL/etc/nginx + + mkdir -p $CRAFT_PART_INSTALL/var/log/nginx + chown -R 10000:10000 $CRAFT_PART_INSTALL/var/log/nginx + + # The reference image creates these symlinks, however a volume is expected + # to be mounted at the given location. + # + # Also, the github image builder job fails when having these links: + # https://paste.ubuntu.com/p/zsDHyR2NY4/plain/ + # + # ln -sf /dev/stdout $CRAFT_PART_INSTALL/var/log/nginx/access.log + # ln -sf /dev/stderr $CRAFT_PART_INSTALL/var/log/nginx/error.log + + # TODO: the upstream image defines a healthcheck, stop signal and a volume, + # should/can we do the same? + + portal: + after: [nginx] + plugin: nil + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + build-snaps: + - node/18/stable + override-build: | + cd $CRAFT_PART_BUILD/src/portal + cp $CRAFT_PART_SRC/api/v2.0/swagger.yaml . + + export NPM_CONFIG_REGISTRY=https://registry.npmjs.org + + export PATH="$PATH:$CRAFT_PART_BUILD/src/portal/node_modules/.bin" + npm install ng-swagger-gen + + npm install --unsafe-perm + npm run generate-build-timestamp + node --max_old_space_size=2048 \ + 'node_modules/@angular/cli/bin/ng' \ + build --configuration production + npm install js-yaml@4.1.0 + node -e "const yaml = require('js-yaml'); const fs = require('fs'); const swagger = yaml.load(fs.readFileSync('swagger.yaml', 'utf8')); fs.writeFileSync('swagger.json', JSON.stringify(swagger));" + cp $CRAFT_PART_SRC/LICENSE dist/ + + cd app-swagger-ui + npm install --unsafe-perm + npm run build + + mkdir -p $CRAFT_PART_INSTALL/usr/share/nginx/html + cp -r $CRAFT_PART_BUILD/src/portal/dist/* $CRAFT_PART_INSTALL/usr/share/nginx/html/ + cp $CRAFT_PART_BUILD/src/portal/swagger.json $CRAFT_PART_INSTALL/usr/share/nginx/html/ + cp -r $CRAFT_PART_BUILD/src/portal/app-swagger-ui/dist/* $CRAFT_PART_INSTALL/usr/share/nginx/html/ + + chown -R 10000:10000 $CRAFT_PART_INSTALL/usr/share/nginx/html diff --git a/v2.10.2/redis-photon/rockcraft.yaml b/v2.10.2/redis-photon/rockcraft.yaml new file mode 100644 index 0000000..56fa402 --- /dev/null +++ b/v2.10.2/redis-photon/rockcraft.yaml @@ -0,0 +1,64 @@ +name: redis +summary: Rock replacement for the Harbor Redis image. +description: > + This rock is a drop in replacement for the + docker.io/goharbor/redis-photon:v2.10.2 image. +# Based on the following: +# https://github.com/goharbor/harbor/tree/v2.10.2/make/photon/redis +version: v2.10.2 +license: Apache-2.0 + +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +package-repositories: + - type: apt + components: [main] + suites: [jammy] + key-id: 54318FA4052D1E61A6B6F7BB5F4349D6BF53AA0C + url: https://packages.redis.io/deb + priority: always + +services: + redis: + command: redis-server /etc/redis.conf + override: replace + startup: enabled + user: redis + group: redis + # working-dir: /var/lib/redis + +parts: + redis-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -g 999 redis + useradd -R $CRAFT_OVERLAY -u 999 -g 999 -c "Redis Database Server" \ + -d /var/lib/redis -s /sbin/nologin -m redis + + image-prep: + after: [redis-user] + plugin: nil + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + override-build: | + mkdir -p $CRAFT_PART_INSTALL/usr/bin + mkdir -p $CRAFT_PART_INSTALL/etc + cd $CRAFT_PART_SRC + cp ./make/photon/redis/docker-healthcheck $CRAFT_PART_INSTALL/usr/bin + cp ./make/photon/redis/redis.conf $CRAFT_PART_INSTALL/etc/redis.conf + + chown 999:999 $CRAFT_PART_INSTALL/etc/redis.conf + chmod +x $CRAFT_PART_INSTALL/usr/bin/docker-healthcheck + + redis: + after: [image-prep] + plugin: nil + stage-packages: [redis] + # TODO: the upstream image defines a healthcheck and a volume, + # should/can we do the same? diff --git a/v2.10.2/registry-photon/redis.patch b/v2.10.2/registry-photon/redis.patch new file mode 100644 index 0000000..ab4649c --- /dev/null +++ b/v2.10.2/registry-photon/redis.patch @@ -0,0 +1,883 @@ +diff --git a/configuration/configuration.go b/configuration/configuration.go +index 7076df85d4..3e74330321 100644 +--- a/configuration/configuration.go ++++ b/configuration/configuration.go +@@ -168,6 +168,9 @@ type Configuration struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + ++ // SentinelMasterSet specifies the the redis sentinel master set name. ++ SentinelMasterSet string `yaml:"sentinelMasterSet,omitempty"` ++ + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + +diff --git a/registry/handlers/app.go b/registry/handlers/app.go +index bf56cea22a..4a7cee9a2e 100644 +--- a/registry/handlers/app.go ++++ b/registry/handlers/app.go +@@ -3,6 +3,7 @@ package handlers + import ( + "context" + "crypto/rand" ++ "errors" + "expvar" + "fmt" + "math" +@@ -16,6 +17,7 @@ import ( + "strings" + "time" + ++ "github.com/FZambia/sentinel" + "github.com/distribution/reference" + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" +@@ -499,6 +501,45 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + return + } + ++ var getRedisAddr func() (string, error) ++ var testOnBorrow func(c redis.Conn, t time.Time) error ++ if configuration.Redis.SentinelMasterSet != "" { ++ sntnl := &sentinel.Sentinel{ ++ Addrs: strings.Split(configuration.Redis.Addr, ","), ++ MasterName: configuration.Redis.SentinelMasterSet, ++ Dial: func(addr string) (redis.Conn, error) { ++ c, err := redis.DialTimeout("tcp", addr, ++ configuration.Redis.DialTimeout, ++ configuration.Redis.ReadTimeout, ++ configuration.Redis.WriteTimeout) ++ if err != nil { ++ return nil, err ++ } ++ return c, nil ++ }, ++ } ++ getRedisAddr = func() (string, error) { ++ return sntnl.MasterAddr() ++ } ++ testOnBorrow = func(c redis.Conn, t time.Time) error { ++ if !sentinel.TestRole(c, "master") { ++ return errors.New("role check failed") ++ } ++ return nil ++ } ++ ++ } else { ++ getRedisAddr = func() (string, error) { ++ return configuration.Redis.Addr, nil ++ } ++ testOnBorrow = func(c redis.Conn, t time.Time) error { ++ // TODO(stevvooe): We can probably do something more interesting ++ // here with the health package. ++ _, err := c.Do("PING") ++ return err ++ } ++ } ++ + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. +@@ -514,8 +555,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + } + } + +- conn, err := redis.DialTimeout("tcp", +- configuration.Redis.Addr, ++ redisAddr, err := getRedisAddr() ++ if err != nil { ++ return nil, err ++ } ++ conn, err := redis.DialTimeout("tcp", redisAddr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) +@@ -547,16 +591,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + done(nil) + return conn, nil + }, +- MaxIdle: configuration.Redis.Pool.MaxIdle, +- MaxActive: configuration.Redis.Pool.MaxActive, +- IdleTimeout: configuration.Redis.Pool.IdleTimeout, +- TestOnBorrow: func(c redis.Conn, t time.Time) error { +- // TODO(stevvooe): We can probably do something more interesting +- // here with the health package. +- _, err := c.Do("PING") +- return err +- }, +- Wait: false, // if a connection is not available, proceed without cache. ++ MaxIdle: configuration.Redis.Pool.MaxIdle, ++ MaxActive: configuration.Redis.Pool.MaxActive, ++ IdleTimeout: configuration.Redis.Pool.IdleTimeout, ++ TestOnBorrow: testOnBorrow, ++ Wait: false, // if a connection is not available, proceed without cache. + } + + app.redis = pool +diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go +index 60a57e6c15..8a644d83d8 100644 +--- a/registry/handlers/app_test.go ++++ b/registry/handlers/app_test.go +@@ -140,7 +140,29 @@ func TestAppDispatcher(t *testing.T) { + // TestNewApp covers the creation of an application via NewApp with a + // configuration. + func TestNewApp(t *testing.T) { +- ctx := context.Background() ++ ++ config := configuration.Configuration{ ++ Storage: configuration.Storage{ ++ "testdriver": nil, ++ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ ++ "enabled": false, ++ }}, ++ }, ++ Auth: configuration.Auth{ ++ // For now, we simply test that new auth results in a viable ++ // application. ++ "silly": { ++ "realm": "realm-test", ++ "service": "service-test", ++ }, ++ }, ++ } ++ runAppWithConfig(t, config) ++} ++ ++// TestNewApp covers the creation of an application via NewApp with a ++// configuration(with redis). ++func TestNewAppWithRedis(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": nil, +@@ -157,7 +179,38 @@ func TestNewApp(t *testing.T) { + }, + }, + } ++ config.Redis.Addr = "127.0.0.1:6379" ++ config.Redis.DB = 0 ++ runAppWithConfig(t, config) ++} + ++// TestNewApp covers the creation of an application via NewApp with a ++// configuration(with redis sentinel cluster). ++func TestNewAppWithRedisSentinelCluster(t *testing.T) { ++ config := configuration.Configuration{ ++ Storage: configuration.Storage{ ++ "testdriver": nil, ++ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ ++ "enabled": false, ++ }}, ++ }, ++ Auth: configuration.Auth{ ++ // For now, we simply test that new auth results in a viable ++ // application. ++ "silly": { ++ "realm": "realm-test", ++ "service": "service-test", ++ }, ++ }, ++ } ++ config.Redis.Addr = "192.168.0.11:26379,192.168.0.12:26379" ++ config.Redis.DB = 0 ++ config.Redis.SentinelMasterSet = "mymaster" ++ runAppWithConfig(t, config) ++} ++ ++func runAppWithConfig(t *testing.T, config configuration.Configuration) { ++ ctx := context.Background() + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. +diff --git a/vendor.conf b/vendor.conf +index 33fe616b76..a8d8f58bc6 100644 +--- a/vendor.conf ++++ b/vendor.conf +@@ -51,3 +51,4 @@ gopkg.in/yaml.v2 v2.2.1 + rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git + github.com/opencontainers/go-digest ea51bea511f75cfa3ef6098cc253c5c3609b037a # v1.0.0 + github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 ++github.com/FZambia/sentinel 5585739eb4b6478aa30161866ccf9ce0ef5847c7 https://github.com/jeremyxu2010/sentinel.git +diff --git a/vendor/github.com/FZambia/sentinel/LICENSE b/vendor/github.com/FZambia/sentinel/LICENSE +new file mode 100644 +index 0000000000..8dada3edaf +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/LICENSE +@@ -0,0 +1,201 @@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++ 1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++ 2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++ 3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++ 4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++ 5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++ 6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++ 7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++ 8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++ 9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++ END OF TERMS AND CONDITIONS ++ ++ APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "{}" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++ Copyright {yyyy} {name of copyright owner} ++ ++ Licensed under the Apache License, Version 2.0 (the "License"); ++ you may not use this file except in compliance with the License. ++ You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++ Unless required by applicable law or agreed to in writing, software ++ distributed under the License is distributed on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ See the License for the specific language governing permissions and ++ limitations under the License. +diff --git a/vendor/github.com/FZambia/sentinel/README.md b/vendor/github.com/FZambia/sentinel/README.md +new file mode 100644 +index 0000000000..f544c54ef6 +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/README.md +@@ -0,0 +1,39 @@ ++go-sentinel ++=========== ++ ++Redis Sentinel support for [redigo](https://github.com/gomodule/redigo) library. ++ ++Documentation ++------------- ++ ++- [API Reference](http://godoc.org/github.com/FZambia/sentinel) ++ ++Alternative solution ++-------------------- ++ ++You can alternatively configure Haproxy between your application and Redis to proxy requests to Redis master instance if you only need HA: ++ ++``` ++listen redis ++ server redis-01 127.0.0.1:6380 check port 6380 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 ++ server redis-02 127.0.0.1:6381 check port 6381 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 backup ++ bind *:6379 ++ mode tcp ++ option tcpka ++ option tcplog ++ option tcp-check ++ tcp-check send PING\r\n ++ tcp-check expect string +PONG ++ tcp-check send info\ replication\r\n ++ tcp-check expect string role:master ++ tcp-check send QUIT\r\n ++ tcp-check expect string +OK ++ balance roundrobin ++``` ++ ++This way you don't need to use this library. ++ ++License ++------- ++ ++Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). +diff --git a/vendor/github.com/FZambia/sentinel/sentinel.go b/vendor/github.com/FZambia/sentinel/sentinel.go +new file mode 100644 +index 0000000000..79209e9f0d +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/sentinel.go +@@ -0,0 +1,426 @@ ++package sentinel ++ ++import ( ++ "errors" ++ "fmt" ++ "net" ++ "strings" ++ "sync" ++ "time" ++ ++ "github.com/garyburd/redigo/redis" ++) ++ ++// Sentinel provides a way to add high availability (HA) to Redis Pool using ++// preconfigured addresses of Sentinel servers and name of master which Sentinels ++// monitor. It works with Redis >= 2.8.12 (mostly because of ROLE command that ++// was introduced in that version, it's possible though to support old versions ++// using INFO command). ++// ++// Example of the simplest usage to contact master "mymaster": ++// ++// func newSentinelPool() *redis.Pool { ++// sntnl := &sentinel.Sentinel{ ++// Addrs: []string{":26379", ":26380", ":26381"}, ++// MasterName: "mymaster", ++// Dial: func(addr string) (redis.Conn, error) { ++// timeout := 500 * time.Millisecond ++// c, err := redis.DialTimeout("tcp", addr, timeout, timeout, timeout) ++// if err != nil { ++// return nil, err ++// } ++// return c, nil ++// }, ++// } ++// return &redis.Pool{ ++// MaxIdle: 3, ++// MaxActive: 64, ++// Wait: true, ++// IdleTimeout: 240 * time.Second, ++// Dial: func() (redis.Conn, error) { ++// masterAddr, err := sntnl.MasterAddr() ++// if err != nil { ++// return nil, err ++// } ++// c, err := redis.Dial("tcp", masterAddr) ++// if err != nil { ++// return nil, err ++// } ++// return c, nil ++// }, ++// TestOnBorrow: func(c redis.Conn, t time.Time) error { ++// if !sentinel.TestRole(c, "master") { ++// return errors.New("Role check failed") ++// } else { ++// return nil ++// } ++// }, ++// } ++// } ++type Sentinel struct { ++ // Addrs is a slice with known Sentinel addresses. ++ Addrs []string ++ ++ // MasterName is a name of Redis master Sentinel servers monitor. ++ MasterName string ++ ++ // Dial is a user supplied function to connect to Sentinel on given address. This ++ // address will be chosen from Addrs slice. ++ // Note that as per the redis-sentinel client guidelines, a timeout is mandatory ++ // while connecting to Sentinels, and should not be set to 0. ++ Dial func(addr string) (redis.Conn, error) ++ ++ // Pool is a user supplied function returning custom connection pool to Sentinel. ++ // This can be useful to tune options if you are not satisfied with what default ++ // Sentinel pool offers. See defaultPool() method for default pool implementation. ++ // In most cases you only need to provide Dial function and let this be nil. ++ Pool func(addr string) *redis.Pool ++ ++ mu sync.RWMutex ++ pools map[string]*redis.Pool ++ addr string ++} ++ ++// NoSentinelsAvailable is returned when all sentinels in the list are exhausted ++// (or none configured), and contains the last error returned by Dial (which ++// may be nil) ++type NoSentinelsAvailable struct { ++ lastError error ++} ++ ++func (ns NoSentinelsAvailable) Error() string { ++ if ns.lastError != nil { ++ return fmt.Sprintf("redigo: no sentinels available; last error: %s", ns.lastError.Error()) ++ } ++ return fmt.Sprintf("redigo: no sentinels available") ++} ++ ++// putToTop puts Sentinel address to the top of address list - this means ++// that all next requests will use Sentinel on this address first. ++// ++// From Sentinel guidelines: ++// ++// The first Sentinel replying to the client request should be put at the ++// start of the list, so that at the next reconnection, we'll try first ++// the Sentinel that was reachable in the previous connection attempt, ++// minimizing latency. ++// ++// Lock must be held by caller. ++func (s *Sentinel) putToTop(addr string) { ++ addrs := s.Addrs ++ if addrs[0] == addr { ++ // Already on top. ++ return ++ } ++ newAddrs := []string{addr} ++ for _, a := range addrs { ++ if a == addr { ++ continue ++ } ++ newAddrs = append(newAddrs, a) ++ } ++ s.Addrs = newAddrs ++} ++ ++// putToBottom puts Sentinel address to the bottom of address list. ++// We call this method internally when see that some Sentinel failed to answer ++// on application request so next time we start with another one. ++// ++// Lock must be held by caller. ++func (s *Sentinel) putToBottom(addr string) { ++ addrs := s.Addrs ++ if addrs[len(addrs)-1] == addr { ++ // Already on bottom. ++ return ++ } ++ newAddrs := []string{} ++ for _, a := range addrs { ++ if a == addr { ++ continue ++ } ++ newAddrs = append(newAddrs, a) ++ } ++ newAddrs = append(newAddrs, addr) ++ s.Addrs = newAddrs ++} ++ ++// defaultPool returns a connection pool to one Sentinel. This allows ++// us to call concurrent requests to Sentinel using connection Do method. ++func (s *Sentinel) defaultPool(addr string) *redis.Pool { ++ return &redis.Pool{ ++ MaxIdle: 3, ++ MaxActive: 10, ++ Wait: true, ++ IdleTimeout: 240 * time.Second, ++ Dial: func() (redis.Conn, error) { ++ return s.Dial(addr) ++ }, ++ TestOnBorrow: func(c redis.Conn, t time.Time) error { ++ _, err := c.Do("PING") ++ return err ++ }, ++ } ++} ++ ++func (s *Sentinel) get(addr string) redis.Conn { ++ pool := s.poolForAddr(addr) ++ return pool.Get() ++} ++ ++func (s *Sentinel) poolForAddr(addr string) *redis.Pool { ++ s.mu.Lock() ++ if s.pools == nil { ++ s.pools = make(map[string]*redis.Pool) ++ } ++ pool, ok := s.pools[addr] ++ if ok { ++ s.mu.Unlock() ++ return pool ++ } ++ s.mu.Unlock() ++ newPool := s.newPool(addr) ++ s.mu.Lock() ++ p, ok := s.pools[addr] ++ if ok { ++ s.mu.Unlock() ++ return p ++ } ++ s.pools[addr] = newPool ++ s.mu.Unlock() ++ return newPool ++} ++ ++func (s *Sentinel) newPool(addr string) *redis.Pool { ++ if s.Pool != nil { ++ return s.Pool(addr) ++ } ++ return s.defaultPool(addr) ++} ++ ++// close connection pool to Sentinel. ++// Lock must be hold by caller. ++func (s *Sentinel) close() { ++ if s.pools != nil { ++ for _, pool := range s.pools { ++ pool.Close() ++ } ++ } ++ s.pools = nil ++} ++ ++func (s *Sentinel) doUntilSuccess(f func(redis.Conn) (interface{}, error)) (interface{}, error) { ++ s.mu.RLock() ++ addrs := s.Addrs ++ s.mu.RUnlock() ++ ++ var lastErr error ++ ++ for _, addr := range addrs { ++ conn := s.get(addr) ++ reply, err := f(conn) ++ conn.Close() ++ if err != nil { ++ lastErr = err ++ s.mu.Lock() ++ pool, ok := s.pools[addr] ++ if ok { ++ pool.Close() ++ delete(s.pools, addr) ++ } ++ s.putToBottom(addr) ++ s.mu.Unlock() ++ continue ++ } ++ s.putToTop(addr) ++ return reply, nil ++ } ++ ++ return nil, NoSentinelsAvailable{lastError: lastErr} ++} ++ ++// MasterAddr returns an address of current Redis master instance. ++func (s *Sentinel) MasterAddr() (string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForMaster(c, s.MasterName) ++ }) ++ if err != nil { ++ return "", err ++ } ++ return res.(string), nil ++} ++ ++// SlaveAddrs returns a slice with known slave addresses of current master instance. ++func (s *Sentinel) SlaveAddrs() ([]string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSlaveAddrs(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]string), nil ++} ++ ++// Slave represents a Redis slave instance which is known by Sentinel. ++type Slave struct { ++ ip string ++ port string ++ flags string ++} ++ ++// Addr returns an address of slave. ++func (s *Slave) Addr() string { ++ return net.JoinHostPort(s.ip, s.port) ++} ++ ++// Available returns if slave is in working state at moment based on information in slave flags. ++func (s *Slave) Available() bool { ++ return !strings.Contains(s.flags, "disconnected") && !strings.Contains(s.flags, "s_down") ++} ++ ++// Slaves returns a slice with known slaves of master instance. ++func (s *Sentinel) Slaves() ([]*Slave, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSlaves(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]*Slave), nil ++} ++ ++// SentinelAddrs returns a slice of known Sentinel addresses Sentinel server aware of. ++func (s *Sentinel) SentinelAddrs() ([]string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSentinels(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]string), nil ++} ++ ++// Discover allows to update list of known Sentinel addresses. From docs: ++// ++// A client may update its internal list of Sentinel nodes following this procedure: ++// 1) Obtain a list of other Sentinels for this master using the command SENTINEL sentinels . ++// 2) Add every ip:port pair not already existing in our list at the end of the list. ++func (s *Sentinel) Discover() error { ++ addrs, err := s.SentinelAddrs() ++ if err != nil { ++ return err ++ } ++ s.mu.Lock() ++ for _, addr := range addrs { ++ if !stringInSlice(addr, s.Addrs) { ++ s.Addrs = append(s.Addrs, addr) ++ } ++ } ++ s.mu.Unlock() ++ return nil ++} ++ ++// Close closes current connection to Sentinel. ++func (s *Sentinel) Close() error { ++ s.mu.Lock() ++ s.close() ++ s.mu.Unlock() ++ return nil ++} ++ ++// TestRole wraps GetRole in a test to verify if the role matches an expected ++// role string. If there was any error in querying the supplied connection, ++// the function returns false. Works with Redis >= 2.8.12. ++// It's not goroutine safe, but if you call this method on pooled connections ++// then you are OK. ++func TestRole(c redis.Conn, expectedRole string) bool { ++ role, err := getRole(c) ++ if err != nil || role != expectedRole { ++ return false ++ } ++ return true ++} ++ ++// getRole is a convenience function supplied to query an instance (master or ++// slave) for its role. It attempts to use the ROLE command introduced in ++// redis 2.8.12. ++func getRole(c redis.Conn) (string, error) { ++ res, err := c.Do("ROLE") ++ if err != nil { ++ return "", err ++ } ++ rres, ok := res.([]interface{}) ++ if ok { ++ return redis.String(rres[0], nil) ++ } ++ return "", errors.New("redigo: can not transform ROLE reply to string") ++} ++ ++func queryForMaster(conn redis.Conn, masterName string) (string, error) { ++ res, err := redis.Strings(conn.Do("SENTINEL", "get-master-addr-by-name", masterName)) ++ if err != nil { ++ return "", err ++ } ++ if len(res) < 2 { ++ return "", errors.New("redigo: malformed get-master-addr-by-name reply") ++ } ++ masterAddr := net.JoinHostPort(res[0], res[1]) ++ return masterAddr, nil ++} ++ ++func queryForSlaveAddrs(conn redis.Conn, masterName string) ([]string, error) { ++ slaves, err := queryForSlaves(conn, masterName) ++ if err != nil { ++ return nil, err ++ } ++ slaveAddrs := make([]string, 0) ++ for _, slave := range slaves { ++ slaveAddrs = append(slaveAddrs, slave.Addr()) ++ } ++ return slaveAddrs, nil ++} ++ ++func queryForSlaves(conn redis.Conn, masterName string) ([]*Slave, error) { ++ res, err := redis.Values(conn.Do("SENTINEL", "slaves", masterName)) ++ if err != nil { ++ return nil, err ++ } ++ slaves := make([]*Slave, 0) ++ for _, a := range res { ++ sm, err := redis.StringMap(a, err) ++ if err != nil { ++ return slaves, err ++ } ++ slave := &Slave{ ++ ip: sm["ip"], ++ port: sm["port"], ++ flags: sm["flags"], ++ } ++ slaves = append(slaves, slave) ++ } ++ return slaves, nil ++} ++ ++func queryForSentinels(conn redis.Conn, masterName string) ([]string, error) { ++ res, err := redis.Values(conn.Do("SENTINEL", "sentinels", masterName)) ++ if err != nil { ++ return nil, err ++ } ++ sentinels := make([]string, 0) ++ for _, a := range res { ++ sm, err := redis.StringMap(a, err) ++ if err != nil { ++ return sentinels, err ++ } ++ sentinels = append(sentinels, fmt.Sprintf("%s:%s", sm["ip"], sm["port"])) ++ } ++ return sentinels, nil ++} ++ ++func stringInSlice(str string, slice []string) bool { ++ for _, s := range slice { ++ if s == str { ++ return true ++ } ++ } ++ return false ++} diff --git a/v2.10.2/registry-photon/rockcraft.yaml b/v2.10.2/registry-photon/rockcraft.yaml new file mode 100644 index 0000000..a01c3c7 --- /dev/null +++ b/v2.10.2/registry-photon/rockcraft.yaml @@ -0,0 +1,84 @@ +name: registry +summary: Rock replacement for the Harbor registry image. +description: > + This rock is a drop in replacement for the + docker.io/goharbor/registry-photon:v2.10.2 image. +# Based on the following: +# https://github.com/goharbor/harbor/tree/v2.10.2/make/photon/registry +version: v2.10.2 +license: Apache-2.0 + +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +services: + registry: + command: /home/harbor/entrypoint.sh + override: replace + startup: enabled + user: harbor + group: harbor + working-dir: /home/harbor + +parts: + create-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -r -g 10000 harbor + useradd -R $CRAFT_OVERLAY \ + --no-log-init -r -m -g 10000 -u 10000 harbor + + image-prep: + after: [create-user] + plugin: nil + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + override-build: | + OUTDIR="$CRAFT_PART_INSTALL/home/harbor" + mkdir -p "$OUTDIR" + cd $CRAFT_PART_SRC + cp ./make/photon/common/install_cert.sh "$OUTDIR/" + cp ./make/photon/registry/entrypoint.sh "$OUTDIR/" + mkdir -p "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$OUTDIR" + chmod u+x "$OUTDIR/entrypoint.sh" + chmod u+x "$OUTDIR/install_cert.sh" + + registry: + after: [image-prep] + build-snaps: + - go/1.21/stable + plugin: go + source-type: git + source: https://github.com/distribution/distribution.git + source-tag: v2.8.3 + source-depth: 1 + override-build: | + git apply --ignore-whitespace $CRAFT_PROJECT_DIR/redis.patch + + # 2.8.3 doesn't have a go.mod definition. + mkdir -p /go/src/github.com/docker + + ln -sf $(pwd) /go/src/github.com/docker/distribution + + pushd /go/src/github.com/docker/distribution + + export GOPATH=/go + export BUILDTAGS=include_oss include_gcs + export GO111MODULE=auto + export CGO_ENABLED=0 + make clean binaries + + mkdir -p $CRAFT_PART_INSTALL/usr/bin + + cp bin/registry $CRAFT_PART_INSTALL/usr/bin/registry_DO_NOT_USE_GC + chown 10000:10000 $CRAFT_PART_INSTALL/usr/bin/registry_DO_NOT_USE_GC + + # TODO: the upstream image defines a healthcheck and a volume, + # should/can we do the same? diff --git a/v2.10.2/trivy-adapter-photon/rockcraft.yaml b/v2.10.2/trivy-adapter-photon/rockcraft.yaml new file mode 100644 index 0000000..d7db7a2 --- /dev/null +++ b/v2.10.2/trivy-adapter-photon/rockcraft.yaml @@ -0,0 +1,94 @@ +name: trivy-adapter +summary: Rock replacement for the Harbor Trivy adapter image. +description: > + This rock is a drop in replacement for the + docker.io/goharbor/trivy-adapter-photon:v2.10.2 image. +# Based on the following: +# https://github.com/goharbor/harbor/tree/v2.10.2/make/photon/trivy-adapter +version: v2.10.2 +license: Apache-2.0 + +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + +services: + scanner: + command: /home/scanner/entrypoint.sh + override: replace + startup: enabled + user: scanner + group: scanner + +parts: + create-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -r -g 10000 scanner + useradd -R $CRAFT_OVERLAY \ + --no-log-init -r -m -g 10000 -u 10000 scanner + + image-prep: + after: [create-user] + plugin: nil + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + override-build: | + OUTDIR="$CRAFT_PART_INSTALL/home/scanner" + mkdir -p "$OUTDIR" + cd $CRAFT_PART_SRC + cp ./make/photon/common/install_cert.sh "$OUTDIR/" + cp ./make/photon/trivy-adapter/entrypoint.sh "$OUTDIR/" + mkdir -p "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$OUTDIR" + chmod u+x "$OUTDIR/entrypoint.sh" + chmod u+x "$OUTDIR/install_cert.sh" + + trivy: + after: [image-prep] + build-snaps: + - go/1.21/stable + plugin: go + source-type: git + source: https://github.com/aquasecurity/trivy + source-tag: v0.50.1 + source-depth: 1 + override-build: | + export GOOS=linux + export GO111MODULE=on + export CGO_ENABLED=0 + go build -o trivy cmd/trivy/main.go + + mkdir -p $CRAFT_PART_INSTALL/usr/local/bin + cp trivy $CRAFT_PART_INSTALL/usr/local/bin/trivy + chown 10000:10000 $CRAFT_PART_INSTALL/usr/local/bin/trivy + + # TODO: the upstream image defines a healthcheck, + # should/can we do the same? + + trivy-adapter: + after: [image-prep] + build-snaps: + - go/1.21/stable + plugin: go + source-type: git + source: https://github.com/aquasecurity/harbor-scanner-trivy + source-tag: v0.30.23 + source-depth: 1 + override-build: | + export GOOS=linux + export GO111MODULE=on + export CGO_ENABLED=0 + go build -o scanner-trivy cmd/scanner-trivy/main.go + + mkdir -p $CRAFT_PART_INSTALL/home/scanner/bin + cp scanner-trivy $CRAFT_PART_INSTALL/home/scanner/bin/scanner-trivy + chown 10000:10000 $CRAFT_PART_INSTALL/home/scanner/bin/scanner-trivy + + # TODO: the upstream image defines a healthcheck, + # should/can we do the same? From f74434ef762cac9937e57ec022995420f96644b6 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Tue, 16 Jul 2024 07:32:17 +0000 Subject: [PATCH 02/19] Fix portal image Rockcraft does not allow two parts to stage the same files with different content. In our case, the nginx package includes a sample at /usr/share/nginx/html that has to be removed. --- v2.10.2/portal/rockcraft.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/v2.10.2/portal/rockcraft.yaml b/v2.10.2/portal/rockcraft.yaml index 31bbefb..87cc99b 100644 --- a/v2.10.2/portal/rockcraft.yaml +++ b/v2.10.2/portal/rockcraft.yaml @@ -49,6 +49,10 @@ parts: # ln -sf /dev/stdout $CRAFT_PART_INSTALL/var/log/nginx/access.log # ln -sf /dev/stderr $CRAFT_PART_INSTALL/var/log/nginx/error.log + # Drop the sample html dir, we'll add the portal files instead and we + # have to avoid staging the same files with different content. + rm -rf $CRAFT_PART_INSTALL/usr/share/nginx/html/ + # TODO: the upstream image defines a healthcheck, stop signal and a volume, # should/can we do the same? From 50cb329496f2b6cb150ef4c0025bc6181abe4554 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Tue, 16 Jul 2024 17:14:30 +0300 Subject: [PATCH 03/19] Add tox files for testing. Signed-off-by: Nashwan Azhari --- tests/integration/conftest.py | 1 + tests/requirements-dev.txt | 5 +++ tests/requirements-test.txt | 7 +++ tests/tox.ini | 80 +++++++++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+) create mode 100644 tests/integration/conftest.py create mode 100644 tests/requirements-dev.txt create mode 100644 tests/requirements-test.txt create mode 100644 tests/tox.ini diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..13128c5 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1 @@ +pytest_plugins = ["k8s_test_harness.conftest"] diff --git a/tests/requirements-dev.txt b/tests/requirements-dev.txt new file mode 100644 index 0000000..a66721a --- /dev/null +++ b/tests/requirements-dev.txt @@ -0,0 +1,5 @@ +black==24.3.0 +codespell==2.2.4 +flake8==6.0.0 +isort==5.12.0 +licenseheaders==0.8.8 diff --git a/tests/requirements-test.txt b/tests/requirements-test.txt new file mode 100644 index 0000000..b6895b5 --- /dev/null +++ b/tests/requirements-test.txt @@ -0,0 +1,7 @@ +coverage[toml]==7.2.5 +pytest==7.3.1 +PyYAML==6.0.1 +tenacity==8.2.3 +charmed-kubeflow-chisme>=0.4 +# TODO(aznashwan): update with upstream Canonical repo when the time comes: +git+https://github.com/petrutlucian94/k8s-test-harness.git@initial diff --git a/tests/tox.ini b/tests/tox.ini new file mode 100644 index 0000000..22a9094 --- /dev/null +++ b/tests/tox.ini @@ -0,0 +1,80 @@ +[tox] +no_package = True +skip_missing_interpreters = True +env_list = format, lint, integration +min_version = 4.0.0 + +[testenv] +set_env = + PYTHONBREAKPOINT=pdb.set_trace + PY_COLORS=1 +pass_env = + PYTHONPATH + +[testenv:format] +description = Apply coding style standards to code +deps = -r {tox_root}/requirements-dev.txt +commands = + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/sanity + isort {tox_root}/sanity --profile=black + black {tox_root}/sanity + + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/integration + isort {tox_root}/integration --profile=black + black {tox_root}/integration + +[testenv:lint] +description = Check code against coding style standards +deps = -r {tox_root}/requirements-dev.txt +commands = + codespell {tox_root}/sanity + flake8 {tox_root}/sanity + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/sanity --dry + isort {tox_root}/sanity --profile=black --check + black {tox_root}/sanity --check --diff + + codespell {tox_root}/integration + flake8 {tox_root}/integration + licenseheaders -t {tox_root}/.copyright.tmpl -cy -o 'Canonical, Ltd' -d {tox_root}/integration --dry + isort {tox_root}/integration --profile=black --check + black {tox_root}/integration --check --diff + +[testenv:sanity] +description = Run integration tests +deps = + -r {tox_root}/requirements-test.txt +commands = + pytest -v \ + --maxfail 1 \ + --tb native \ + --log-cli-level DEBUG \ + --disable-warnings \ + {posargs} \ + {tox_root}/sanity +pass_env = + TEST_* + ROCK_* + +[testenv:integration] +description = Run integration tests +deps = + -r {tox_root}/requirements-test.txt +commands = + pytest -v \ + --maxfail 1 \ + --tb native \ + --log-cli-level DEBUG \ + --disable-warnings \ + {posargs} \ + {tox_root}/integration +pass_env = + TEST_* + ROCK_* + +[flake8] +max-line-length = 120 +select = E,W,F,C,N +# E231 rule is not aware of f-strings +ignore = W503,E231 +exclude = venv,.git,.tox,.tox_env,.venv,build,dist,*.egg_info +show-source = true From 4d9a1067b0b301b2aafd7f3cd3765bfc6d5f9e2f Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 10 Jul 2024 12:37:49 +0300 Subject: [PATCH 04/19] Add rockcraft specs for main Harbor components Adds rockcraft specs for following core Harbor component images: * docker.io/goharbor/harbor-core:v2.10.2 * docker.io/goharbor/harbor-jobservice:v2.10.2 * docker.io/goharbor/harbor-registryctl:v2.10.2 * docker.io/goharbor/harbor-db:v2.10.2 * docker.io/goharbor/harbor-exporter:v2.10.2 Signed-off-by: Nashwan Azhari --- tests/sanity/test_harbor_core_v2_10_2.py | 87 ++ tests/sanity/test_harbor_db_v2_10_2.py | 70 ++ tests/sanity/test_harbor_exporter_v2_10_2.py | 87 ++ .../sanity/test_harbor_jobservice_v2_10_2.py | 87 ++ .../sanity/test_harbor_registryctl_v2_10_2.py | 106 +++ v2.10.2/README.txt | 14 + v2.10.2/harbor-core/README.md | 3 + v2.10.2/harbor-core/rockcraft.yaml | 189 ++++ v2.10.2/harbor-db/README.md | 3 + v2.10.2/harbor-db/rockcraft.yaml | 177 ++++ v2.10.2/harbor-exporter/README.md | 3 + v2.10.2/harbor-exporter/rockcraft.yaml | 129 +++ v2.10.2/harbor-jobservice/README.md | 3 + v2.10.2/harbor-jobservice/rockcraft.yaml | 127 +++ v2.10.2/harbor-registryctl/README.md | 3 + v2.10.2/harbor-registryctl/redis.patch | 883 ++++++++++++++++++ v2.10.2/harbor-registryctl/rockcraft.yaml | 161 ++++ 17 files changed, 2132 insertions(+) create mode 100644 tests/sanity/test_harbor_core_v2_10_2.py create mode 100644 tests/sanity/test_harbor_db_v2_10_2.py create mode 100644 tests/sanity/test_harbor_exporter_v2_10_2.py create mode 100644 tests/sanity/test_harbor_jobservice_v2_10_2.py create mode 100644 tests/sanity/test_harbor_registryctl_v2_10_2.py create mode 100644 v2.10.2/README.txt create mode 100644 v2.10.2/harbor-core/README.md create mode 100644 v2.10.2/harbor-core/rockcraft.yaml create mode 100644 v2.10.2/harbor-db/README.md create mode 100644 v2.10.2/harbor-db/rockcraft.yaml create mode 100644 v2.10.2/harbor-exporter/README.md create mode 100644 v2.10.2/harbor-exporter/rockcraft.yaml create mode 100644 v2.10.2/harbor-jobservice/README.md create mode 100644 v2.10.2/harbor-jobservice/rockcraft.yaml create mode 100644 v2.10.2/harbor-registryctl/README.md create mode 100644 v2.10.2/harbor-registryctl/redis.patch create mode 100644 v2.10.2/harbor-registryctl/rockcraft.yaml diff --git a/tests/sanity/test_harbor_core_v2_10_2.py b/tests/sanity/test_harbor_core_v2_10_2.py new file mode 100644 index 0000000..e82f14f --- /dev/null +++ b/tests/sanity/test_harbor_core_v2_10_2.py @@ -0,0 +1,87 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import random +import pytest +import string +import subprocess +import sys + +from charmed_kubeflow_chisme.rock import CheckRock + +logger: logging.Logger = logging.getLogger(__name__) + +logger.addHandler(logging.FileHandler(f"{__name__}.log")) +logger.addHandler(logging.StreamHandler(sys.stdout)) + + +ORIGINAL_IMAGE = "docker.io/goharbor/harbor-core" + +@pytest.fixture() +def rock_test_env(tmpdir): + """Yields a temporary directory and random docker container name, then cleans them up after.""" + container_name = "".join( + [str(i) for i in random.choices(string.ascii_lowercase, k=8)] + ) + yield tmpdir, container_name + + try: + subprocess.run(["docker", "rm", container_name]) + except Exception: + pass + # tmpdir fixture we use here should clean up the other files for us + + +def _list_files_in_image_dir( + image: str, container_name: str, root_dir: str="/") -> list[str]: + """Lists all regular file paths under the given dir in the given image.""" + cmd = [ + "docker", + "run", + "--rm", + "--name", + container_name, + image, + "find", + root_dir, + "-type", + "f" + ] + + proc = subprocess.run(cmd, capture_output=True) + return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] + + +@pytest.mark.abort_on_fail +def test_rock(rock_test_env): + """Test rock.""" + _, container_name = rock_test_env + check_rock = CheckRock("rockcraft.yaml") + rock_image = check_rock.get_name() + rock_version = check_rock.get_version() + LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" + ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" + + dir_to_check = "/harbor" + + original_image_files = _list_files_in_image_dir( + ORIGINAL_ROCK_IMAGE, f"{container_name}-original", + root_dir=dir_to_check) + local_rock_files = _list_files_in_image_dir( + LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + + rock_fileset = set(local_rock_files) + original_fileset = set(original_image_files) + + original_extra_files = original_fileset - rock_fileset + if original_extra_files: + pytest.fail( + f"Missing some files from the original image: " + f"{original_extra_files}") + + rock_extra_files = rock_fileset - original_fileset + if rock_extra_files: + pytest.fail( + f"Rock has extra files not present in original image: " + f"{rock_extra_files}") diff --git a/tests/sanity/test_harbor_db_v2_10_2.py b/tests/sanity/test_harbor_db_v2_10_2.py new file mode 100644 index 0000000..29e29eb --- /dev/null +++ b/tests/sanity/test_harbor_db_v2_10_2.py @@ -0,0 +1,70 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import random +import pytest +import string +import subprocess +import sys + +from charmed_kubeflow_chisme.rock import CheckRock + +logger: logging.Logger = logging.getLogger(__name__) + +logger.addHandler(logging.FileHandler(f"{__name__}.log")) +logger.addHandler(logging.StreamHandler(sys.stdout)) + + +@pytest.fixture() +def rock_test_env(tmpdir): + """Yields a temporary directory and random docker container name, then cleans them up after.""" + container_name = "".join( + [str(i) for i in random.choices(string.ascii_lowercase, k=8)] + ) + yield tmpdir, container_name + + try: + subprocess.run(["docker", "rm", container_name]) + except Exception: + pass + # tmpdir fixture we use here should clean up the other files for us + + + def _check_file_present_in_image(image: str, path_to_check: str): + """Checks whether a file with the given path is present within an image.""" + subprocess.run( + [ + "docker", + "run", + image, + "exec", + "ls", + "-la", + path_to_check, + ], + check=True, + ) + + +@pytest.mark.abort_on_fail +def test_rock(rock_test_env): + """Test rock.""" + _, container_name = rock_test_env + check_rock = CheckRock("rockcraft.yaml") + rock_image = check_rock.get_name() + rock_version = check_rock.get_version() + LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" + + image_files_to_check = [ + "/var/lib/postgresql/data", + "/run/postgresq", + "/docker-entrypoint.sh", + "/initdb.sh", + "/upgrade.sh", + "/docker-healthcheck.sh", + "/docker-entrypoint-initdb.d/initial-registry.sql", + ] + + for file in image_files_to_check: + _check_file_present_in_image(LOCAL_ROCK_IMAGE, file) diff --git a/tests/sanity/test_harbor_exporter_v2_10_2.py b/tests/sanity/test_harbor_exporter_v2_10_2.py new file mode 100644 index 0000000..2d081a4 --- /dev/null +++ b/tests/sanity/test_harbor_exporter_v2_10_2.py @@ -0,0 +1,87 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import random +import pytest +import string +import subprocess +import sys + +from charmed_kubeflow_chisme.rock import CheckRock + +logger: logging.Logger = logging.getLogger(__name__) + +logger.addHandler(logging.FileHandler(f"{__name__}.log")) +logger.addHandler(logging.StreamHandler(sys.stdout)) + + +ORIGINAL_IMAGE = "docker.io/goharbor/harbor-exporter" + +@pytest.fixture() +def rock_test_env(tmpdir): + """Yields a temporary directory and random docker container name, then cleans them up after.""" + container_name = "".join( + [str(i) for i in random.choices(string.ascii_lowercase, k=8)] + ) + yield tmpdir, container_name + + try: + subprocess.run(["docker", "rm", container_name]) + except Exception: + pass + # tmpdir fixture we use here should clean up the other files for us + + +def _list_files_in_image_dir( + image: str, container_name: str, root_dir: str="/") -> list[str]: + """Lists all regular file paths under the given dir in the given image.""" + cmd = [ + "docker", + "run", + "--rm", + "--name", + container_name, + image, + "find", + root_dir, + "-type", + "f" + ] + + proc = subprocess.run(cmd, capture_output=True) + return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] + + +@pytest.mark.abort_on_fail +def test_rock(rock_test_env): + """Test rock.""" + _, container_name = rock_test_env + check_rock = CheckRock("rockcraft.yaml") + rock_image = check_rock.get_name() + rock_version = check_rock.get_version() + LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" + ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" + + dir_to_check = "/harbor" + + original_image_files = _list_files_in_image_dir( + ORIGINAL_ROCK_IMAGE, f"{container_name}-original", + root_dir=dir_to_check) + local_rock_files = _list_files_in_image_dir( + LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + + rock_fileset = set(local_rock_files) + original_fileset = set(original_image_files) + + original_extra_files = original_fileset - rock_fileset + if original_extra_files: + pytest.fail( + f"Missing some files from the original image: " + f"{original_extra_files}") + + rock_extra_files = rock_fileset - original_fileset + if rock_extra_files: + pytest.fail( + f"Rock has extra files not present in original image: " + f"{rock_extra_files}") diff --git a/tests/sanity/test_harbor_jobservice_v2_10_2.py b/tests/sanity/test_harbor_jobservice_v2_10_2.py new file mode 100644 index 0000000..d777d37 --- /dev/null +++ b/tests/sanity/test_harbor_jobservice_v2_10_2.py @@ -0,0 +1,87 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import random +import pytest +import string +import subprocess +import sys + +from charmed_kubeflow_chisme.rock import CheckRock + +logger: logging.Logger = logging.getLogger(__name__) + +logger.addHandler(logging.FileHandler(f"{__name__}.log")) +logger.addHandler(logging.StreamHandler(sys.stdout)) + + +ORIGINAL_IMAGE = "docker.io/goharbor/harbor-jobservice" + +@pytest.fixture() +def rock_test_env(tmpdir): + """Yields a temporary directory and random docker container name, then cleans them up after.""" + container_name = "".join( + [str(i) for i in random.choices(string.ascii_lowercase, k=8)] + ) + yield tmpdir, container_name + + try: + subprocess.run(["docker", "rm", container_name]) + except Exception: + pass + # tmpdir fixture we use here should clean up the other files for us + + +def _list_files_in_image_dir( + image: str, container_name: str, root_dir: str="/") -> list[str]: + """Lists all regular file paths under the given dir in the given image.""" + cmd = [ + "docker", + "run", + "--rm", + "--name", + container_name, + image, + "find", + root_dir, + "-type", + "f" + ] + + proc = subprocess.run(cmd, capture_output=True) + return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] + + +@pytest.mark.abort_on_fail +def test_rock(rock_test_env): + """Test rock.""" + _, container_name = rock_test_env + check_rock = CheckRock("rockcraft.yaml") + rock_image = check_rock.get_name() + rock_version = check_rock.get_version() + LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" + ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" + + dir_to_check = "/harbor" + + original_image_files = _list_files_in_image_dir( + ORIGINAL_ROCK_IMAGE, f"{container_name}-original", + root_dir=dir_to_check) + local_rock_files = _list_files_in_image_dir( + LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + + rock_fileset = set(local_rock_files) + original_fileset = set(original_image_files) + + original_extra_files = original_fileset - rock_fileset + if original_extra_files: + pytest.fail( + f"Missing some files from the original image: " + f"{original_extra_files}") + + rock_extra_files = rock_fileset - original_fileset + if rock_extra_files: + pytest.fail( + f"Rock has extra files not present in original image: " + f"{rock_extra_files}") diff --git a/tests/sanity/test_harbor_registryctl_v2_10_2.py b/tests/sanity/test_harbor_registryctl_v2_10_2.py new file mode 100644 index 0000000..5df6ba2 --- /dev/null +++ b/tests/sanity/test_harbor_registryctl_v2_10_2.py @@ -0,0 +1,106 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import random +import pytest +import string +import subprocess +import sys + +from charmed_kubeflow_chisme.rock import CheckRock + +logger: logging.Logger = logging.getLogger(__name__) + +logger.addHandler(logging.FileHandler(f"{__name__}.log")) +logger.addHandler(logging.StreamHandler(sys.stdout)) + + +ORIGINAL_IMAGE = "docker.io/goharbor/harbor-registryctl" + +@pytest.fixture() +def rock_test_env(tmpdir): + """Yields a temporary directory and random docker container name, then cleans them up after.""" + container_name = "".join( + [str(i) for i in random.choices(string.ascii_lowercase, k=8)] + ) + yield tmpdir, container_name + + try: + subprocess.run(["docker", "rm", container_name]) + except Exception: + pass + # tmpdir fixture we use here should clean up the other files for us + + +def _list_files_in_image_dir( + image: str, container_name: str, root_dir: str="/") -> list[str]: + """Lists all regular file paths under the given dir in the given image.""" + cmd = [ + "docker", + "run", + "--rm", + "--name", + container_name, + image, + "find", + root_dir, + "-type", + "f" + ] + + proc = subprocess.run(cmd, capture_output=True) + return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] + + +def _check_file_present_in_image(image: str, path_to_check: str): + """Checks whether a file with the given path is present within an image.""" + subprocess.run( + [ + "docker", + "run", + image, + "exec", + "ls", + "-la", + path_to_check, + ], + check=True, + ) + +@pytest.mark.abort_on_fail +def test_rock(rock_test_env): + """Test rock.""" + _, container_name = rock_test_env + check_rock = CheckRock("rockcraft.yaml") + rock_image = check_rock.get_name() + rock_version = check_rock.get_version() + LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" + ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" + + dir_to_check = "/harbor" + + original_image_files = _list_files_in_image_dir( + ORIGINAL_ROCK_IMAGE, f"{container_name}-original", + root_dir=dir_to_check) + local_rock_files = _list_files_in_image_dir( + LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + + rock_fileset = set(local_rock_files) + original_fileset = set(original_image_files) + + original_extra_files = original_fileset - rock_fileset + if original_extra_files: + pytest.fail( + f"Missing some files from the original image: " + f"{original_extra_files}") + + rock_extra_files = rock_fileset - original_fileset + if rock_extra_files: + pytest.fail( + f"Rock has extra files not present in original image: " + f"{rock_extra_files}") + + # NOTE(aznashwan): the registryctl image also embeds a `registry` binary: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile#L6 + _check_file_present_in_image(LOCAL_ROCK_IMAGE, "/usr/bin/registry_DO_NOT_USE_GC") diff --git a/v2.10.2/README.txt b/v2.10.2/README.txt new file mode 100644 index 0000000..0aedabe --- /dev/null +++ b/v2.10.2/README.txt @@ -0,0 +1,14 @@ +# Canonical ROCKs for Harbor v2.10.2 + +Aim to be compatible with following upstream images: + +* docker.io/goharbor/nginx-photon:v2.10.2 +* docker.io/goharbor/harbor-portal:v2.10.2 +* docker.io/goharbor/harbor-core:v2.10.2 +* docker.io/goharbor/harbor-jobservice:v2.10.2 +* docker.io/goharbor/registry-photon:v2.10.2 +* docker.io/goharbor/harbor-registryctl:v2.10.2 +* docker.io/goharbor/trivy-adapter-photon:v2.10.2 +* docker.io/goharbor/harbor-db:v2.10.2 +* docker.io/goharbor/redis-photon:v2.10.2 +* docker.io/goharbor/harbor-exporter:v2.10.2 diff --git a/v2.10.2/harbor-core/README.md b/v2.10.2/harbor-core/README.md new file mode 100644 index 0000000..94846b0 --- /dev/null +++ b/v2.10.2/harbor-core/README.md @@ -0,0 +1,3 @@ +# ROCK specs for harbor-core. + +Aims to be compatible with `docker.io/goharbor/harbor-core`. diff --git a/v2.10.2/harbor-core/rockcraft.yaml b/v2.10.2/harbor-core/rockcraft.yaml new file mode 100644 index 0000000..e37d2c9 --- /dev/null +++ b/v2.10.2/harbor-core/rockcraft.yaml @@ -0,0 +1,189 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Rockcraft definition for Harbor core image: +# docker.io/goharbor/harbor-core:v2.10.2 + +name: harbor-core +summary: Rock containing Harbor Core components. +description: | + Packages the core service of Harbor. +license: Apache-2.0 + +version: "2.10.2" + +# NOTE(aznashwan): the base for the core image is VMware's Photon, +# but rockcraft only currently supports bare/ubuntu-based bases. +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + + +services: + harbor_core: + startup: enabled + override: replace + + # NOTE(aznashwan) set entrypoint.sh for compatibility with upstream image. + # All it does is run `./make/photon/common/install_cert.sh` and exec `harbor_core`. + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/core/Dockerfile#L21 + command: /harbor/entrypoint.sh + + user: harbor + group: harbor + working-dir: /harbor + + # TODO(aznashwan): original Docker image includes Healthcheck should/can we also? + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/core/Dockerfile#L5 + + +parts: + create-harbor-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -r -g 10000 harbor + useradd -R $CRAFT_OVERLAY \ + --no-log-init -r -m -g 10000 -u 10000 harbor + + build-deps: + plugin: nil + build-snaps: + # https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L143 + - go/1.21/stable + build-packages: + - make + - curl + + # NOTE(aznashwan): lint_apis Makefile target should ideally + # be also run for added compatibility guarantees: + # https://github.com/goharbor/harbor/blob/f86f1cebc3a1af8c5c14c0a94d687fff04ebc6eb/Makefile#L289-L291 + # Sourced from: + # https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L309 + gen-apis: + plugin: nil + + # TODO(aznashwan): avoid cloning the repo twice. + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + + build-environment: + # NOTE(aznashwan): the swagger binary needs a Go path pre-created. + - CGO_ENABLED: 0 + - GOPATH: /root/go + + override-build: | + set -eux + + mkdir -p "$GOPATH" + + SWAGGER_VERSION=v0.25.0 + cd "$CRAFT_PART_SRC/src/core" + + curl -fsSL -o /usr/bin/swagger \ + https://github.com/go-swagger/go-swagger/releases/download/$SWAGGER_VERSION/swagger_linux_${CRAFT_ARCH_BUILD_FOR} + chmod +x /usr/bin/swagger + + /usr/bin/swagger generate server \ + --template-dir "$CRAFT_PART_SRC/tools/swagger/templates" --exclude-main \ + --additional-initialism=CVE --additional-initialism=GC --additional-initialism=OIDC \ + -f "$CRAFT_PART_SRC/api/v2.0/swagger.yaml" \ + --target "$CRAFT_PART_BUILD/src/server/v2.0" \ + -A harbor + + # Move them to the project dir for use in later building part: + cp -r "$CRAFT_PART_BUILD/src/server/v2.0" "$CRAFT_PROJECT_DIR/swagger-server-specs" + + # Sourced from: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/core/Dockerfile.base + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/core/Dockerfile + image-prep: + after: [create-harbor-user] + plugin: nil + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + + stage-packages: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/core/Dockerfile.base#L3 + - tzdata + + override-build: | + set -eux + cd $CRAFT_PART_SRC + + # Copy over auxiliary files: + OUTDIR="$CRAFT_PART_INSTALL/harbor" + mkdir -p "$OUTDIR" + + cp ./make/photon/common/install_cert.sh "$OUTDIR/" + cp ./make/photon/core/entrypoint.sh "$OUTDIR/" + + cp -r ./make/migrations "$OUTDIR/migrations" + cp -r ./src/core/views "$OUTDIR/views" + mkdir -p "$OUTDIR/icons" + cp -r ./icons "$OUTDIR/icons" + + mkdir -p "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$OUTDIR/" + chmod u+x "$OUTDIR/entrypoint.sh" + chmod u+x "$OUTDIR/install_cert.sh" + + # NOTE(aznashwan): these values are harcoded in Harbor's Makefile. + # The Trivy version used by Harbor will almost surely be different from + # the one used in the rockcraft workflows, so this is techinically a lie: + # canonical/k8s-workflows/.github/workflows/scan_images.yaml + MAKEFILE="$CRAFT_PART_SRC/Makefile" + VERSION_FILE="$CRAFT_PART_INSTALL/harbor/version" + echo "VERSION_TAG: v2.10.2" >> "$VERSION_FILE" + sed -E -n "s/^(REGISTRYVERSION=(.*))$/REGISTRY_VERSION: \2/p" "$MAKEFILE" >> "$VERSION_FILE" + sed -E -n "s/^(TRIVYVERSION=(.*))$/TRIVY_VERSION: \2/p" "$MAKEFILE" >> "$VERSION_FILE" + sed -E -n "s/^(TRIVYADAPTERVERSION=(.*))$/TRIVY_ADAPTER_VERSION: \2/p" "$MAKEFILE" >> "$VERSION_FILE" + + # Sourced from: https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L342 + build-harbor-core: + after: [create-harbor-user, build-deps, gen-apis, image-prep] + # NOTE(aznashwan): Harbor's Makefile relies on building through Docker, + # so we have to run the build commands manually: + plugin: go + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + source-subdir: src + + build-environment: + - CGO_ENABLED: 0 + - GOARCH: $CRAFT_ARCH_BUILD_FOR + + override-build: | + set -eux + + # Copy over swagger API gens from previous part: + mkdir -p "$CRAFT_PART_SRC/src/server/v2.0" + cp -r "$CRAFT_PROJECT_DIR/swagger-server-specs/"* "$CRAFT_PART_SRC/src/server/v2.0/" + + # Deduce ldflags: + GIT_TAG="v2.10.2" + GIT_COMMIT_ID=`git -C "$CRAFT_PART_SRC" log --pretty=tformat:"%h" -n1` + + # Build binary: + cd "$CRAFT_PART_SRC/src/core" + go build \ + -ldflags="-w -s -X github.com/goharbor/harbor/src/pkg/version.GitCommit=$GIT_COMMIT_ID -X github.com/goharbor/harbor/src/pkg/version.ReleaseVersion=$GIT_TAG" \ + -o "$CRAFT_PART_BUILD/harbor_core" + + # Copy over binary and set appropriate permissions: + mkdir -p $CRAFT_PART_INSTALL/harbor + cp $CRAFT_PART_BUILD/harbor_core $CRAFT_PART_INSTALL/harbor + + chown 10000:10000 "$CRAFT_PART_INSTALL/harbor/harbor_core" + chmod u+x "$CRAFT_PART_INSTALL/harbor/harbor_core" + diff --git a/v2.10.2/harbor-db/README.md b/v2.10.2/harbor-db/README.md new file mode 100644 index 0000000..3cb8a5d --- /dev/null +++ b/v2.10.2/harbor-db/README.md @@ -0,0 +1,3 @@ +# ROCK specs for harbor-db. + +Aims to be compatible with `docker.io/goharbor/harbor-db`. diff --git a/v2.10.2/harbor-db/rockcraft.yaml b/v2.10.2/harbor-db/rockcraft.yaml new file mode 100644 index 0000000..6965afc --- /dev/null +++ b/v2.10.2/harbor-db/rockcraft.yaml @@ -0,0 +1,177 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Rockcraft definition for Harbor db image, which is basically a +# re-packaged PostgreSQL with some added configs/setup files. +# docker.io/goharbor/harbor-db:v2.10.2 + +name: harbor-db +summary: Rock containing Harbor DB PostgreSQL component. +description: | + Packages the PostgreSQL DB of Harbor. +license: Apache-2.0 + +version: "2.10.2" + +# NOTE(aznashwan): the base for the DB image is VMware's Photon, +# but rockcraft only currently supports bare/ubuntu-based bases. +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + + +# NOTE(aznashwan): The PhotonOS PostgreSQL package releases are +# simply built from the upstream sources from postgresql.org: +# https://github.com/vmware/photon/blob/5.0/SPECS/postgresql/postgresql13.spec#L23 +# To avoid needlessly building the packages ourselves, we simply +# add the upstream posgresql.org deb repos to the build host: +package-repositories: + - type: apt + components: [main] + suites: [jammy-pgdg] + key-id: B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 + url: http://apt.postgresql.org/pub/repos/apt + priority: always + + +environment: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/Dockerfile.base#L3 + PGDATA: /var/lib/postgresql/data + + +services: + + # HACK(aznashwan): the upstream image's entrypoint calls `initdb.sh`, + # which in turn calls postgres' `initdb` with a hardcoded en_US.UTF-8 locale: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/initdb.sh#L34 + # Because we can't run any of the commands from the `locales` package within + # the primed filesystem's context (they'll just change locale settings on + # the build host), and locale-related files are apparently excluded + # from the final image (1), we have to set this up as its own service: + # (1): https://github.com/canonical/craft-parts/blob/1.33.0/craft_parts/packages/deb.py#L140 + locale_setup: + startup: enabled + override: replace + command: bash -c "apt-get update && apt-get install -y language-pack-en-base" + on-success: ignore + + harbor_db: + startup: enabled + override: replace + requires: + - locale_setup + + # NOTE(aznashwan) set docker-entrypoint.sh for compatibility with upstream image. + # It takes as arguments the older and current ProstgreSQL versions to start: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/Dockerfile#L15 + command: /docker-entrypoint.sh 13 14 + + user: postgres + group: postgres + + # TODO(aznashwan): original Docker image includes Healthcheck should/can we also? + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/Dockerfile#L16 + + +parts: + create-postgres-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -r postgres --gid=999 + useradd -R $CRAFT_OVERLAY -m -r -g postgres --uid=999 postgres + + # Sourced from: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/Dockerfile.base + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/Dockerfile + setup-harbor-db: + after: [create-postgres-user] + plugin: nil + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + + build-environment: + - PGDATA: /var/lib/postgresql/data + + stage-packages: + # NOTE(aznashwan): upstream image installs both Postgres 13 and 14: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/Dockerfile.base#L9-L10 + - postgresql-13 + - postgresql-14 + - findutils + - bc + - util-linux + - net-tools + # HACK(aznashwan): the upstream image's entrypoint calls `initdb.sh`, + # which in turn calls postgres' `initdb` with a hardcoded en_US.UTF-8 locale: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/db/initdb.sh#L34 + # Because we can't run any of the commands from the `locales` package within + # the primed filesystem's context (they'll just change locale settings on + # the build host), we simply stage this umbrella package for English: + # + # Doesn't seem to work due to `locales` being explicitly excluded: + # https://github.com/canonical/craft-parts/blob/1.33.0/craft_parts/packages/deb.py#L140 + # - locales + # - language-pack-en-base + + override-build: | + set -eux + + # Setup Postgres files: + mkdir -p "$CRAFT_PART_INSTALL/run/postgresql" + chown -R 999:999 "$CRAFT_PART_INSTALL/run/postgresql" + chmod 2777 "$CRAFT_PART_INSTALL/run/postgresql" + + mkdir -p "$CRAFT_PART_INSTALL/$PGDATA" + chown -R 999:999 "$CRAFT_PART_INSTALL/$PGDATA" + chmod 777 "$CRAFT_PART_INSTALL/$PGDATA" + + sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" \ + "$CRAFT_PART_INSTALL/usr/share/postgresql/14/postgresql.conf.sample" + sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" \ + "$CRAFT_PART_INSTALL/usr/share/postgresql/14/postgresql.conf.sample" + + # Copy over auxiliary files: + cd "$CRAFT_PART_SRC" + OUTDIR="$CRAFT_PART_INSTALL" + + cp ./make/photon/db/docker-entrypoint.sh "$OUTDIR/docker-entrypoint.sh" + chown 999:999 "$OUTDIR/docker-entrypoint.sh" + chmod u+x "$OUTDIR/docker-entrypoint.sh" + + cp ./make/photon/db/docker-healthcheck.sh "$OUTDIR/docker-healthcheck.sh" + chown 999:999 "$OUTDIR/docker-healthcheck.sh" + chmod u+x "$OUTDIR/docker-healthcheck.sh" + + cp ./make/photon/db/initdb.sh "$OUTDIR/initdb.sh" + chown 999:999 "$OUTDIR/initdb.sh" + # NOTE(aznashwan): initdb.sh is NOT chmod'd for some reason... + + cp ./make/photon/db/upgrade.sh "$OUTDIR/upgrade.sh" + # NOTE(aznashwan): upgrade.sh is NOT chown'd or chmod'd for some reason... + + mkdir -p "$OUTDIR/docker-entrypoint-initdb.d" + chown -R 999:999 "$OUTDIR/docker-entrypoint-initdb.d" + + cp ./make/photon/db/initial-registry.sql \ + "$OUTDIR/docker-entrypoint-initdb.d/initial-registry.sql" + cp ./make/photon/db/upgrade.sh "$OUTDIR/" + + # HACK(aznashwan): the upstream harbor-db image sets up + # update-alternatives for initdb and a handful of other + # commands bundled with PostgreSQL, which will use the + # versions from postgresql-14, so we simply symlink them: + # https://github.com/vmware/photon/blob/5.0/SPECS/postgresql/postgresql14.spec#L374-L387 + for full_path in "$CRAFT_PART_INSTALL/usr/lib/postgresql/14/bin/"*; do + exe=$(basename $full_path) + if [ ! -f "$CRAFT_PART_INSTALL/usr/bin/$exe" ]; then + ln -s \ + "/usr/lib/postgresql/14/bin/$exe" \ + "$CRAFT_PART_INSTALL/usr/bin/$exe" + fi + done + diff --git a/v2.10.2/harbor-exporter/README.md b/v2.10.2/harbor-exporter/README.md new file mode 100644 index 0000000..6bf2f8b --- /dev/null +++ b/v2.10.2/harbor-exporter/README.md @@ -0,0 +1,3 @@ +# ROCK specs for harbor-exporter. + +Aims to be compatible with `docker.io/goharbor/harbor-exporter:v2.10.2`. diff --git a/v2.10.2/harbor-exporter/rockcraft.yaml b/v2.10.2/harbor-exporter/rockcraft.yaml new file mode 100644 index 0000000..7af2e09 --- /dev/null +++ b/v2.10.2/harbor-exporter/rockcraft.yaml @@ -0,0 +1,129 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Rockcraft definition for Harbor eporter image: +# docker.io/goharbor/harbor-exporter:v2.10.2 + +name: harbor-exporter +summary: Rock containing Harbor Exporter component. +description: | + Packages the Expoter of Harbor. +license: Apache-2.0 + +version: "2.10.2" + +# NOTE(aznashwan): the base for the expoter image is VMware's Photon, +# but rockcraft only currently supports bare/ubuntu-based bases. +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + + +services: + harbor_expoter: + startup: enabled + override: replace + + # NOTE(aznashwan) set exntrypoint.sh for compatibility with upstream image. + # All it does is run `./make/photon/common/install_cert.sh` and exec `harbor_expoter`. + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/exporter/Dockerfile#L29 + command: /harbor/entrypoint.sh + + user: harbor + group: harbor + working-dir: /harbor + + +parts: + create-harbor-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -r -g 10000 harbor + useradd -R $CRAFT_OVERLAY \ + --no-log-init -r -m -g 10000 -u 10000 harbor + + build-deps: + plugin: nil + build-snaps: + # https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L143 + - go/1.21/stable + build-packages: + - make + + # Sourced from: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/exporter/Dockerfile.base + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/exporter/Dockerfile + image-prep: + after: [create-harbor-user] + plugin: nil + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + + stage-packages: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/exporter/Dockerfile.base#L3 + - tzdata + + override-build: | + set -eux + cd $CRAFT_PART_SRC + + # Copy over auxiliary files: + OUTDIR="$CRAFT_PART_INSTALL/harbor" + mkdir -p "$OUTDIR" + + cp ./make/photon/common/install_cert.sh "$OUTDIR/" + cp ./make/photon/exporter/entrypoint.sh "$OUTDIR/" + + mkdir -p "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$OUTDIR/" + chmod u+x "$OUTDIR/entrypoint.sh" + chmod u+x "$OUTDIR/install_cert.sh" + + # Sourced from: https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/exporter/Dockerfile + build-harbor-exporter: + after: [create-harbor-user, build-deps, image-prep] + # NOTE(aznashwan): Harbor's Makefile relies on building through Docker, + # so we have to run the build commands manually: + plugin: go + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + source-subdir: src + + build-environment: + - GOOS: linux + - CGO_ENABLED: 0 + - GOARCH: $CRAFT_ARCH_BUILD_FOR + + override-build: | + set -eux + + # Deduce ldflags: + # NOTE(aznashwan): the exporter binary bears no build tags, but we add + # them anyway for consistency with the building of the core binaries: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/exporter/Dockerfile#L13 + # https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L153 + GIT_TAG="v2.10.2" + GIT_COMMIT_ID=`git -C "$CRAFT_PART_SRC" log --pretty=tformat:"%h" -n1` + + # Build binary: + cd "$CRAFT_PART_SRC/src/cmd/exporter" + go build \ + -ldflags="-w -s -X github.com/goharbor/harbor/src/pkg/version.GitCommit=$GIT_COMMIT_ID -X github.com/goharbor/harbor/src/pkg/version.ReleaseVersion=$GIT_TAG" \ + -o "$CRAFT_PART_BUILD/harbor_exporter" + + # Copy over binary and set appropriate permissions: + mkdir -p $CRAFT_PART_INSTALL/harbor + cp $CRAFT_PART_BUILD/harbor_exporter $CRAFT_PART_INSTALL/harbor + + chown 10000:10000 "$CRAFT_PART_INSTALL/harbor/harbor_exporter" + chmod u+x "$CRAFT_PART_INSTALL/harbor/harbor_exporter" + diff --git a/v2.10.2/harbor-jobservice/README.md b/v2.10.2/harbor-jobservice/README.md new file mode 100644 index 0000000..3aa44e1 --- /dev/null +++ b/v2.10.2/harbor-jobservice/README.md @@ -0,0 +1,3 @@ +# ROCK specs for harbor-jobservice. + +Aims to be compatible with `docker.io/goharbor/harbor-jobservice`. diff --git a/v2.10.2/harbor-jobservice/rockcraft.yaml b/v2.10.2/harbor-jobservice/rockcraft.yaml new file mode 100644 index 0000000..f3f6cea --- /dev/null +++ b/v2.10.2/harbor-jobservice/rockcraft.yaml @@ -0,0 +1,127 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Rockcraft definition for Harbor jobservice image: +# docker.io/goharbor/harbor-jobservice:v2.10.2 + +name: harbor-jobservice +summary: Rock containing Harbor Jobservice component. +description: | + Packages the Jobservice of Harbor. +license: Apache-2.0 + +version: "2.10.2" + +# NOTE(aznashwan): the base for the jobservice image is VMware's Photon, +# but rockcraft only currently supports bare/ubuntu-based bases. +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + + +services: + harbor_jobservice: + startup: enabled + override: replace + + # NOTE(aznashwan) set entrypoint.sh for compatibility with upstream image. + # All it does is run `./make/photon/common/install_cert.sh` and exec `harbor_jobservice`. + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/jobservice/Dockerfile#L24 + command: /harbor/entrypoint.sh + + user: harbor + group: harbor + working-dir: /harbor + + # TODO(aznashwan): original Docker image includes Healthcheck should/can we also? + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/jobservice/Dockerfile#L22 + + +parts: + create-harbor-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -r -g 10000 harbor + useradd -R $CRAFT_OVERLAY \ + --no-log-init -r -m -g 10000 -u 10000 harbor + + build-deps: + plugin: nil + build-snaps: + # https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L143 + - go/1.21/stable + build-packages: + - make + + # Sourced from: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/jobservice/Dockerfile.base + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/jobservice/Dockerfile + image-prep: + after: [create-harbor-user] + plugin: nil + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + + stage-packages: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/jobservice/Dockerfile.base#L3 + - tzdata + + override-build: | + set -eux + cd $CRAFT_PART_SRC + + # Copy over auxiliary files: + OUTDIR="$CRAFT_PART_INSTALL/harbor" + mkdir -p "$OUTDIR" + + cp ./make/photon/common/install_cert.sh "$OUTDIR/" + cp ./make/photon/jobservice/entrypoint.sh "$OUTDIR/" + + mkdir -p "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$OUTDIR/" + chmod u+x "$OUTDIR/entrypoint.sh" + chmod u+x "$OUTDIR/install_cert.sh" + + # Sourced from: https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L347 + build-harbor-jobservice: + after: [create-harbor-user, build-deps, image-prep] + # NOTE(aznashwan): Harbor's Makefile relies on building through Docker, + # so we have to run the build commands manually: + plugin: go + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + source-subdir: src + + build-environment: + - CGO_ENABLED: 0 + - GOARCH: $CRAFT_ARCH_BUILD_FOR + + override-build: | + set -eux + + # Deduce ldflags: + GIT_TAG="v2.10.2" + GIT_COMMIT_ID=`git -C "$CRAFT_PART_SRC" log --pretty=tformat:"%h" -n1` + + # Build binary: + cd "$CRAFT_PART_SRC/src/jobservice" + go build \ + -ldflags="-w -s -X github.com/goharbor/harbor/src/pkg/version.GitCommit=$GIT_COMMIT_ID -X github.com/goharbor/harbor/src/pkg/version.ReleaseVersion=$GIT_TAG" \ + -o "$CRAFT_PART_BUILD/harbor_jobservice" + + # Copy over binary and set appropriate permissions: + mkdir -p $CRAFT_PART_INSTALL/harbor + cp $CRAFT_PART_BUILD/harbor_jobservice $CRAFT_PART_INSTALL/harbor + + chown 10000:10000 "$CRAFT_PART_INSTALL/harbor/harbor_jobservice" + chmod u+x "$CRAFT_PART_INSTALL/harbor/harbor_jobservice" + diff --git a/v2.10.2/harbor-registryctl/README.md b/v2.10.2/harbor-registryctl/README.md new file mode 100644 index 0000000..ca33e10 --- /dev/null +++ b/v2.10.2/harbor-registryctl/README.md @@ -0,0 +1,3 @@ +# ROCK specs for harbor-registryctl. + +Aims to be compatible with `docker.io/goharbor/harbor-registryctl`. diff --git a/v2.10.2/harbor-registryctl/redis.patch b/v2.10.2/harbor-registryctl/redis.patch new file mode 100644 index 0000000..ab4649c --- /dev/null +++ b/v2.10.2/harbor-registryctl/redis.patch @@ -0,0 +1,883 @@ +diff --git a/configuration/configuration.go b/configuration/configuration.go +index 7076df85d4..3e74330321 100644 +--- a/configuration/configuration.go ++++ b/configuration/configuration.go +@@ -168,6 +168,9 @@ type Configuration struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + ++ // SentinelMasterSet specifies the the redis sentinel master set name. ++ SentinelMasterSet string `yaml:"sentinelMasterSet,omitempty"` ++ + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + +diff --git a/registry/handlers/app.go b/registry/handlers/app.go +index bf56cea22a..4a7cee9a2e 100644 +--- a/registry/handlers/app.go ++++ b/registry/handlers/app.go +@@ -3,6 +3,7 @@ package handlers + import ( + "context" + "crypto/rand" ++ "errors" + "expvar" + "fmt" + "math" +@@ -16,6 +17,7 @@ import ( + "strings" + "time" + ++ "github.com/FZambia/sentinel" + "github.com/distribution/reference" + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" +@@ -499,6 +501,45 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + return + } + ++ var getRedisAddr func() (string, error) ++ var testOnBorrow func(c redis.Conn, t time.Time) error ++ if configuration.Redis.SentinelMasterSet != "" { ++ sntnl := &sentinel.Sentinel{ ++ Addrs: strings.Split(configuration.Redis.Addr, ","), ++ MasterName: configuration.Redis.SentinelMasterSet, ++ Dial: func(addr string) (redis.Conn, error) { ++ c, err := redis.DialTimeout("tcp", addr, ++ configuration.Redis.DialTimeout, ++ configuration.Redis.ReadTimeout, ++ configuration.Redis.WriteTimeout) ++ if err != nil { ++ return nil, err ++ } ++ return c, nil ++ }, ++ } ++ getRedisAddr = func() (string, error) { ++ return sntnl.MasterAddr() ++ } ++ testOnBorrow = func(c redis.Conn, t time.Time) error { ++ if !sentinel.TestRole(c, "master") { ++ return errors.New("role check failed") ++ } ++ return nil ++ } ++ ++ } else { ++ getRedisAddr = func() (string, error) { ++ return configuration.Redis.Addr, nil ++ } ++ testOnBorrow = func(c redis.Conn, t time.Time) error { ++ // TODO(stevvooe): We can probably do something more interesting ++ // here with the health package. ++ _, err := c.Do("PING") ++ return err ++ } ++ } ++ + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. +@@ -514,8 +555,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + } + } + +- conn, err := redis.DialTimeout("tcp", +- configuration.Redis.Addr, ++ redisAddr, err := getRedisAddr() ++ if err != nil { ++ return nil, err ++ } ++ conn, err := redis.DialTimeout("tcp", redisAddr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) +@@ -547,16 +591,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + done(nil) + return conn, nil + }, +- MaxIdle: configuration.Redis.Pool.MaxIdle, +- MaxActive: configuration.Redis.Pool.MaxActive, +- IdleTimeout: configuration.Redis.Pool.IdleTimeout, +- TestOnBorrow: func(c redis.Conn, t time.Time) error { +- // TODO(stevvooe): We can probably do something more interesting +- // here with the health package. +- _, err := c.Do("PING") +- return err +- }, +- Wait: false, // if a connection is not available, proceed without cache. ++ MaxIdle: configuration.Redis.Pool.MaxIdle, ++ MaxActive: configuration.Redis.Pool.MaxActive, ++ IdleTimeout: configuration.Redis.Pool.IdleTimeout, ++ TestOnBorrow: testOnBorrow, ++ Wait: false, // if a connection is not available, proceed without cache. + } + + app.redis = pool +diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go +index 60a57e6c15..8a644d83d8 100644 +--- a/registry/handlers/app_test.go ++++ b/registry/handlers/app_test.go +@@ -140,7 +140,29 @@ func TestAppDispatcher(t *testing.T) { + // TestNewApp covers the creation of an application via NewApp with a + // configuration. + func TestNewApp(t *testing.T) { +- ctx := context.Background() ++ ++ config := configuration.Configuration{ ++ Storage: configuration.Storage{ ++ "testdriver": nil, ++ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ ++ "enabled": false, ++ }}, ++ }, ++ Auth: configuration.Auth{ ++ // For now, we simply test that new auth results in a viable ++ // application. ++ "silly": { ++ "realm": "realm-test", ++ "service": "service-test", ++ }, ++ }, ++ } ++ runAppWithConfig(t, config) ++} ++ ++// TestNewApp covers the creation of an application via NewApp with a ++// configuration(with redis). ++func TestNewAppWithRedis(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": nil, +@@ -157,7 +179,38 @@ func TestNewApp(t *testing.T) { + }, + }, + } ++ config.Redis.Addr = "127.0.0.1:6379" ++ config.Redis.DB = 0 ++ runAppWithConfig(t, config) ++} + ++// TestNewApp covers the creation of an application via NewApp with a ++// configuration(with redis sentinel cluster). ++func TestNewAppWithRedisSentinelCluster(t *testing.T) { ++ config := configuration.Configuration{ ++ Storage: configuration.Storage{ ++ "testdriver": nil, ++ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ ++ "enabled": false, ++ }}, ++ }, ++ Auth: configuration.Auth{ ++ // For now, we simply test that new auth results in a viable ++ // application. ++ "silly": { ++ "realm": "realm-test", ++ "service": "service-test", ++ }, ++ }, ++ } ++ config.Redis.Addr = "192.168.0.11:26379,192.168.0.12:26379" ++ config.Redis.DB = 0 ++ config.Redis.SentinelMasterSet = "mymaster" ++ runAppWithConfig(t, config) ++} ++ ++func runAppWithConfig(t *testing.T, config configuration.Configuration) { ++ ctx := context.Background() + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. +diff --git a/vendor.conf b/vendor.conf +index 33fe616b76..a8d8f58bc6 100644 +--- a/vendor.conf ++++ b/vendor.conf +@@ -51,3 +51,4 @@ gopkg.in/yaml.v2 v2.2.1 + rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git + github.com/opencontainers/go-digest ea51bea511f75cfa3ef6098cc253c5c3609b037a # v1.0.0 + github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 ++github.com/FZambia/sentinel 5585739eb4b6478aa30161866ccf9ce0ef5847c7 https://github.com/jeremyxu2010/sentinel.git +diff --git a/vendor/github.com/FZambia/sentinel/LICENSE b/vendor/github.com/FZambia/sentinel/LICENSE +new file mode 100644 +index 0000000000..8dada3edaf +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/LICENSE +@@ -0,0 +1,201 @@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++ 1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++ 2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++ 3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++ 4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++ 5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++ 6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++ 7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++ 8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++ 9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++ END OF TERMS AND CONDITIONS ++ ++ APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "{}" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++ Copyright {yyyy} {name of copyright owner} ++ ++ Licensed under the Apache License, Version 2.0 (the "License"); ++ you may not use this file except in compliance with the License. ++ You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++ Unless required by applicable law or agreed to in writing, software ++ distributed under the License is distributed on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ See the License for the specific language governing permissions and ++ limitations under the License. +diff --git a/vendor/github.com/FZambia/sentinel/README.md b/vendor/github.com/FZambia/sentinel/README.md +new file mode 100644 +index 0000000000..f544c54ef6 +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/README.md +@@ -0,0 +1,39 @@ ++go-sentinel ++=========== ++ ++Redis Sentinel support for [redigo](https://github.com/gomodule/redigo) library. ++ ++Documentation ++------------- ++ ++- [API Reference](http://godoc.org/github.com/FZambia/sentinel) ++ ++Alternative solution ++-------------------- ++ ++You can alternatively configure Haproxy between your application and Redis to proxy requests to Redis master instance if you only need HA: ++ ++``` ++listen redis ++ server redis-01 127.0.0.1:6380 check port 6380 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 ++ server redis-02 127.0.0.1:6381 check port 6381 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 backup ++ bind *:6379 ++ mode tcp ++ option tcpka ++ option tcplog ++ option tcp-check ++ tcp-check send PING\r\n ++ tcp-check expect string +PONG ++ tcp-check send info\ replication\r\n ++ tcp-check expect string role:master ++ tcp-check send QUIT\r\n ++ tcp-check expect string +OK ++ balance roundrobin ++``` ++ ++This way you don't need to use this library. ++ ++License ++------- ++ ++Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). +diff --git a/vendor/github.com/FZambia/sentinel/sentinel.go b/vendor/github.com/FZambia/sentinel/sentinel.go +new file mode 100644 +index 0000000000..79209e9f0d +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/sentinel.go +@@ -0,0 +1,426 @@ ++package sentinel ++ ++import ( ++ "errors" ++ "fmt" ++ "net" ++ "strings" ++ "sync" ++ "time" ++ ++ "github.com/garyburd/redigo/redis" ++) ++ ++// Sentinel provides a way to add high availability (HA) to Redis Pool using ++// preconfigured addresses of Sentinel servers and name of master which Sentinels ++// monitor. It works with Redis >= 2.8.12 (mostly because of ROLE command that ++// was introduced in that version, it's possible though to support old versions ++// using INFO command). ++// ++// Example of the simplest usage to contact master "mymaster": ++// ++// func newSentinelPool() *redis.Pool { ++// sntnl := &sentinel.Sentinel{ ++// Addrs: []string{":26379", ":26380", ":26381"}, ++// MasterName: "mymaster", ++// Dial: func(addr string) (redis.Conn, error) { ++// timeout := 500 * time.Millisecond ++// c, err := redis.DialTimeout("tcp", addr, timeout, timeout, timeout) ++// if err != nil { ++// return nil, err ++// } ++// return c, nil ++// }, ++// } ++// return &redis.Pool{ ++// MaxIdle: 3, ++// MaxActive: 64, ++// Wait: true, ++// IdleTimeout: 240 * time.Second, ++// Dial: func() (redis.Conn, error) { ++// masterAddr, err := sntnl.MasterAddr() ++// if err != nil { ++// return nil, err ++// } ++// c, err := redis.Dial("tcp", masterAddr) ++// if err != nil { ++// return nil, err ++// } ++// return c, nil ++// }, ++// TestOnBorrow: func(c redis.Conn, t time.Time) error { ++// if !sentinel.TestRole(c, "master") { ++// return errors.New("Role check failed") ++// } else { ++// return nil ++// } ++// }, ++// } ++// } ++type Sentinel struct { ++ // Addrs is a slice with known Sentinel addresses. ++ Addrs []string ++ ++ // MasterName is a name of Redis master Sentinel servers monitor. ++ MasterName string ++ ++ // Dial is a user supplied function to connect to Sentinel on given address. This ++ // address will be chosen from Addrs slice. ++ // Note that as per the redis-sentinel client guidelines, a timeout is mandatory ++ // while connecting to Sentinels, and should not be set to 0. ++ Dial func(addr string) (redis.Conn, error) ++ ++ // Pool is a user supplied function returning custom connection pool to Sentinel. ++ // This can be useful to tune options if you are not satisfied with what default ++ // Sentinel pool offers. See defaultPool() method for default pool implementation. ++ // In most cases you only need to provide Dial function and let this be nil. ++ Pool func(addr string) *redis.Pool ++ ++ mu sync.RWMutex ++ pools map[string]*redis.Pool ++ addr string ++} ++ ++// NoSentinelsAvailable is returned when all sentinels in the list are exhausted ++// (or none configured), and contains the last error returned by Dial (which ++// may be nil) ++type NoSentinelsAvailable struct { ++ lastError error ++} ++ ++func (ns NoSentinelsAvailable) Error() string { ++ if ns.lastError != nil { ++ return fmt.Sprintf("redigo: no sentinels available; last error: %s", ns.lastError.Error()) ++ } ++ return fmt.Sprintf("redigo: no sentinels available") ++} ++ ++// putToTop puts Sentinel address to the top of address list - this means ++// that all next requests will use Sentinel on this address first. ++// ++// From Sentinel guidelines: ++// ++// The first Sentinel replying to the client request should be put at the ++// start of the list, so that at the next reconnection, we'll try first ++// the Sentinel that was reachable in the previous connection attempt, ++// minimizing latency. ++// ++// Lock must be held by caller. ++func (s *Sentinel) putToTop(addr string) { ++ addrs := s.Addrs ++ if addrs[0] == addr { ++ // Already on top. ++ return ++ } ++ newAddrs := []string{addr} ++ for _, a := range addrs { ++ if a == addr { ++ continue ++ } ++ newAddrs = append(newAddrs, a) ++ } ++ s.Addrs = newAddrs ++} ++ ++// putToBottom puts Sentinel address to the bottom of address list. ++// We call this method internally when see that some Sentinel failed to answer ++// on application request so next time we start with another one. ++// ++// Lock must be held by caller. ++func (s *Sentinel) putToBottom(addr string) { ++ addrs := s.Addrs ++ if addrs[len(addrs)-1] == addr { ++ // Already on bottom. ++ return ++ } ++ newAddrs := []string{} ++ for _, a := range addrs { ++ if a == addr { ++ continue ++ } ++ newAddrs = append(newAddrs, a) ++ } ++ newAddrs = append(newAddrs, addr) ++ s.Addrs = newAddrs ++} ++ ++// defaultPool returns a connection pool to one Sentinel. This allows ++// us to call concurrent requests to Sentinel using connection Do method. ++func (s *Sentinel) defaultPool(addr string) *redis.Pool { ++ return &redis.Pool{ ++ MaxIdle: 3, ++ MaxActive: 10, ++ Wait: true, ++ IdleTimeout: 240 * time.Second, ++ Dial: func() (redis.Conn, error) { ++ return s.Dial(addr) ++ }, ++ TestOnBorrow: func(c redis.Conn, t time.Time) error { ++ _, err := c.Do("PING") ++ return err ++ }, ++ } ++} ++ ++func (s *Sentinel) get(addr string) redis.Conn { ++ pool := s.poolForAddr(addr) ++ return pool.Get() ++} ++ ++func (s *Sentinel) poolForAddr(addr string) *redis.Pool { ++ s.mu.Lock() ++ if s.pools == nil { ++ s.pools = make(map[string]*redis.Pool) ++ } ++ pool, ok := s.pools[addr] ++ if ok { ++ s.mu.Unlock() ++ return pool ++ } ++ s.mu.Unlock() ++ newPool := s.newPool(addr) ++ s.mu.Lock() ++ p, ok := s.pools[addr] ++ if ok { ++ s.mu.Unlock() ++ return p ++ } ++ s.pools[addr] = newPool ++ s.mu.Unlock() ++ return newPool ++} ++ ++func (s *Sentinel) newPool(addr string) *redis.Pool { ++ if s.Pool != nil { ++ return s.Pool(addr) ++ } ++ return s.defaultPool(addr) ++} ++ ++// close connection pool to Sentinel. ++// Lock must be hold by caller. ++func (s *Sentinel) close() { ++ if s.pools != nil { ++ for _, pool := range s.pools { ++ pool.Close() ++ } ++ } ++ s.pools = nil ++} ++ ++func (s *Sentinel) doUntilSuccess(f func(redis.Conn) (interface{}, error)) (interface{}, error) { ++ s.mu.RLock() ++ addrs := s.Addrs ++ s.mu.RUnlock() ++ ++ var lastErr error ++ ++ for _, addr := range addrs { ++ conn := s.get(addr) ++ reply, err := f(conn) ++ conn.Close() ++ if err != nil { ++ lastErr = err ++ s.mu.Lock() ++ pool, ok := s.pools[addr] ++ if ok { ++ pool.Close() ++ delete(s.pools, addr) ++ } ++ s.putToBottom(addr) ++ s.mu.Unlock() ++ continue ++ } ++ s.putToTop(addr) ++ return reply, nil ++ } ++ ++ return nil, NoSentinelsAvailable{lastError: lastErr} ++} ++ ++// MasterAddr returns an address of current Redis master instance. ++func (s *Sentinel) MasterAddr() (string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForMaster(c, s.MasterName) ++ }) ++ if err != nil { ++ return "", err ++ } ++ return res.(string), nil ++} ++ ++// SlaveAddrs returns a slice with known slave addresses of current master instance. ++func (s *Sentinel) SlaveAddrs() ([]string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSlaveAddrs(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]string), nil ++} ++ ++// Slave represents a Redis slave instance which is known by Sentinel. ++type Slave struct { ++ ip string ++ port string ++ flags string ++} ++ ++// Addr returns an address of slave. ++func (s *Slave) Addr() string { ++ return net.JoinHostPort(s.ip, s.port) ++} ++ ++// Available returns if slave is in working state at moment based on information in slave flags. ++func (s *Slave) Available() bool { ++ return !strings.Contains(s.flags, "disconnected") && !strings.Contains(s.flags, "s_down") ++} ++ ++// Slaves returns a slice with known slaves of master instance. ++func (s *Sentinel) Slaves() ([]*Slave, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSlaves(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]*Slave), nil ++} ++ ++// SentinelAddrs returns a slice of known Sentinel addresses Sentinel server aware of. ++func (s *Sentinel) SentinelAddrs() ([]string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSentinels(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]string), nil ++} ++ ++// Discover allows to update list of known Sentinel addresses. From docs: ++// ++// A client may update its internal list of Sentinel nodes following this procedure: ++// 1) Obtain a list of other Sentinels for this master using the command SENTINEL sentinels . ++// 2) Add every ip:port pair not already existing in our list at the end of the list. ++func (s *Sentinel) Discover() error { ++ addrs, err := s.SentinelAddrs() ++ if err != nil { ++ return err ++ } ++ s.mu.Lock() ++ for _, addr := range addrs { ++ if !stringInSlice(addr, s.Addrs) { ++ s.Addrs = append(s.Addrs, addr) ++ } ++ } ++ s.mu.Unlock() ++ return nil ++} ++ ++// Close closes current connection to Sentinel. ++func (s *Sentinel) Close() error { ++ s.mu.Lock() ++ s.close() ++ s.mu.Unlock() ++ return nil ++} ++ ++// TestRole wraps GetRole in a test to verify if the role matches an expected ++// role string. If there was any error in querying the supplied connection, ++// the function returns false. Works with Redis >= 2.8.12. ++// It's not goroutine safe, but if you call this method on pooled connections ++// then you are OK. ++func TestRole(c redis.Conn, expectedRole string) bool { ++ role, err := getRole(c) ++ if err != nil || role != expectedRole { ++ return false ++ } ++ return true ++} ++ ++// getRole is a convenience function supplied to query an instance (master or ++// slave) for its role. It attempts to use the ROLE command introduced in ++// redis 2.8.12. ++func getRole(c redis.Conn) (string, error) { ++ res, err := c.Do("ROLE") ++ if err != nil { ++ return "", err ++ } ++ rres, ok := res.([]interface{}) ++ if ok { ++ return redis.String(rres[0], nil) ++ } ++ return "", errors.New("redigo: can not transform ROLE reply to string") ++} ++ ++func queryForMaster(conn redis.Conn, masterName string) (string, error) { ++ res, err := redis.Strings(conn.Do("SENTINEL", "get-master-addr-by-name", masterName)) ++ if err != nil { ++ return "", err ++ } ++ if len(res) < 2 { ++ return "", errors.New("redigo: malformed get-master-addr-by-name reply") ++ } ++ masterAddr := net.JoinHostPort(res[0], res[1]) ++ return masterAddr, nil ++} ++ ++func queryForSlaveAddrs(conn redis.Conn, masterName string) ([]string, error) { ++ slaves, err := queryForSlaves(conn, masterName) ++ if err != nil { ++ return nil, err ++ } ++ slaveAddrs := make([]string, 0) ++ for _, slave := range slaves { ++ slaveAddrs = append(slaveAddrs, slave.Addr()) ++ } ++ return slaveAddrs, nil ++} ++ ++func queryForSlaves(conn redis.Conn, masterName string) ([]*Slave, error) { ++ res, err := redis.Values(conn.Do("SENTINEL", "slaves", masterName)) ++ if err != nil { ++ return nil, err ++ } ++ slaves := make([]*Slave, 0) ++ for _, a := range res { ++ sm, err := redis.StringMap(a, err) ++ if err != nil { ++ return slaves, err ++ } ++ slave := &Slave{ ++ ip: sm["ip"], ++ port: sm["port"], ++ flags: sm["flags"], ++ } ++ slaves = append(slaves, slave) ++ } ++ return slaves, nil ++} ++ ++func queryForSentinels(conn redis.Conn, masterName string) ([]string, error) { ++ res, err := redis.Values(conn.Do("SENTINEL", "sentinels", masterName)) ++ if err != nil { ++ return nil, err ++ } ++ sentinels := make([]string, 0) ++ for _, a := range res { ++ sm, err := redis.StringMap(a, err) ++ if err != nil { ++ return sentinels, err ++ } ++ sentinels = append(sentinels, fmt.Sprintf("%s:%s", sm["ip"], sm["port"])) ++ } ++ return sentinels, nil ++} ++ ++func stringInSlice(str string, slice []string) bool { ++ for _, s := range slice { ++ if s == str { ++ return true ++ } ++ } ++ return false ++} diff --git a/v2.10.2/harbor-registryctl/rockcraft.yaml b/v2.10.2/harbor-registryctl/rockcraft.yaml new file mode 100644 index 0000000..86133e8 --- /dev/null +++ b/v2.10.2/harbor-registryctl/rockcraft.yaml @@ -0,0 +1,161 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Rockcraft definition for Harbor registryctl image: +# docker.io/goharbor/harbor-registryctl:v2.10.2 + +name: harbor-registryctl +summary: Rock containing Harbor Registryctl component. +description: | + Packages the Registryctl service of Harbor. +license: Apache-2.0 + +version: "2.10.2" + +# NOTE(aznashwan): the base for the registryctl image is VMware's Photon, +# but rockcraft only currently supports bare/ubuntu-based bases. +base: ubuntu@22.04 +build-base: ubuntu@22.04 +platforms: + amd64: + arm64: + + +services: + harbor_registryctl: + startup: enabled + override: replace + + # NOTE(aznashwan) set start.sh for compatibility with upstream image. + # All it does is run `./make/photon/common/install_cert.sh` and exec `harbor_registryctl`. + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile#L21 + command: /harbor/start.sh + + user: harbor + group: harbor + working-dir: /harbor + + # TODO(aznashwan): original Docker image includes Healthcheck should/can we also? + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile#L17 + + +parts: + create-harbor-user: + plugin: nil + overlay-script: | + groupadd -R $CRAFT_OVERLAY -r -g 10000 harbor + useradd -R $CRAFT_OVERLAY \ + --no-log-init -r -m -g 10000 -u 10000 harbor + + build-deps: + plugin: nil + build-snaps: + # https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L143 + - go/1.21/stable + build-packages: + - make + + # Sourced from: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile.base + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile + image-prep: + after: [create-harbor-user] + plugin: nil + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + + override-build: | + set -eux + cd $CRAFT_PART_SRC + + # Copy over auxiliary files: + OUTDIR="$CRAFT_PART_INSTALL/harbor" + mkdir -p "$OUTDIR" + + cp ./make/photon/common/install_cert.sh "$OUTDIR/" + cp ./make/photon/registryctl/start.sh "$OUTDIR/" + + mkdir -p "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$CRAFT_PART_INSTALL/etc/pki/tls/certs" + chown -R 10000:10000 "$OUTDIR/" + chmod u+x "$OUTDIR/start.sh" + chmod u+x "$OUTDIR/install_cert.sh" + + mkdir -p /etc/registry + + # The registryctl image embeds the `registry` binary as well so its + # build steps have been lifted 1:1 from the `photon-registry` ROCK: + # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile#L12 + # https://github.com/canonical/harbor-rocks/pull/4 + build-registry-binary: + after: [create-harbor-user] + build-snaps: + - go/1.21/stable + plugin: go + source-type: git + source: https://github.com/distribution/distribution.git + source-tag: v2.8.3 + override-build: | + git apply --ignore-whitespace $CRAFT_PROJECT_DIR/redis.patch + + # 2.8.3 doesn't have a go.mod definition. + mkdir -p /go/src/github.com/docker + + if [ ! -L /go/src/github.com/docker/distribution ]; then + ln -s $(pwd) /go/src/github.com/docker/distribution + fi + + pushd /go/src/github.com/docker/distribution + + export GOPATH=/go + export BUILDTAGS=include_oss include_gcs + export GO111MODULE=auto + export CGO_ENABLED=0 + make clean binaries + + mkdir -p $CRAFT_PART_INSTALL/home/harbor + mkdir -p $CRAFT_PART_INSTALL/usr/bin + + cp bin/registry $CRAFT_PART_INSTALL/usr/bin/registry_DO_NOT_USE_GC + chown 10000:10000 $CRAFT_PART_INSTALL/usr/bin/registry_DO_NOT_USE_GC + + # Sourced from: https://github.com/goharbor/harbor/blob/v2.10.2/Makefile#L352 + build-harbor-registryctl: + after: [create-harbor-user, build-deps, image-prep, build-registry-binary] + # NOTE(aznashwan): Harbor's Makefile relies on building through Docker, + # so we have to run the build commands manually: + plugin: go + + source-type: git + source: https://github.com/goharbor/harbor + source-tag: v2.10.2 + source-depth: 1 + source-subdir: src + + build-environment: + - CGO_ENABLED: 0 + - GOARCH: $CRAFT_ARCH_BUILD_FOR + + override-build: | + set -eux + + # Deduce ldflags: + GIT_TAG="v2.10.2" + GIT_COMMIT_ID=`git -C "$CRAFT_PART_SRC" log --pretty=tformat:"%h" -n1` + + # Build binary: + cd "$CRAFT_PART_SRC/src/registryctl" + go build \ + -ldflags="-w -s -X github.com/goharbor/harbor/src/pkg/version.GitCommit=$GIT_COMMIT_ID -X github.com/goharbor/harbor/src/pkg/version.ReleaseVersion=$GIT_TAG" \ + -o "$CRAFT_PART_BUILD/harbor_registryctl" + + # Copy over binary and set appropriate permissions: + mkdir -p $CRAFT_PART_INSTALL/harbor + cp $CRAFT_PART_BUILD/harbor_registryctl $CRAFT_PART_INSTALL/harbor + + chown 10000:10000 "$CRAFT_PART_INSTALL/harbor/harbor_registryctl" + chmod u+x "$CRAFT_PART_INSTALL/harbor/harbor_registryctl" + From 3c46630d07fc6411391d11ebf72d6fb4c66b6fc3 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 17 Jul 2024 16:47:40 +0300 Subject: [PATCH 05/19] Hook in testing workflow on pull_requests. Signed-off-by: Nashwan Azhari --- .github/workflows/pull_request.yaml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index 7cc7788..938707b 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -8,7 +8,9 @@ on: jobs: build-and-push-arch-specifics: name: Build Rocks and Push Arch Specific Images - uses: canonical/k8s-workflows/.github/workflows/build_rocks.yaml@main + # HACK(aznashwan): replace with upstream canonical-owned + # workflow once github.com/canonical/k8s-workflows/pull/13 merges. + uses: aznashwan/k8s-workflows/.github/workflows/build_rocks.yaml@main with: owner: ${{ github.repository_owner }} trivy-image-config: "trivy.yaml" @@ -18,6 +20,16 @@ jobs: rockcraft-revisions: '{"amd64": "1783", "arm64": "1784"}' arch-skipping-maximize-build-space: '["arm64"]' platform-labels: '{"arm64": ["Ubuntu_ARM64_4C_16G_01"]}' + + run-tests: + # HACK(aznashwan): replace with upstream canonical-owned + # workflow once github.com/canonical/k8s-workflows/pull/15 merges. + uses: aznashwan/k8s-workflows/.github/workflows/run_tests.yaml@main + needs: [build-and-push-arch-specifics] + secrets: inherit + with: + rock-metas: ${{ needs.build-and-push-arch-specifics.outputs.rock-metas }} + scan-images: uses: canonical/k8s-workflows/.github/workflows/scan_images.yaml@main needs: [build-and-push-arch-specifics] @@ -26,10 +38,11 @@ jobs: upload-result: ${{ github.event_name == 'push' }} images: ${{ needs.build-and-push-arch-specifics.outputs.images }} trivy-image-config: ./trivy.yaml + build-and-push-multiarch-manifest: name: Combine Rocks and Push Multiarch Manifest uses: canonical/k8s-workflows/.github/workflows/assemble_multiarch_image.yaml@main - needs: [build-and-push-arch-specifics] + needs: [build-and-push-arch-specifics, run-tests, scan-images] with: rock-metas: ${{ needs.build-and-push-arch-specifics.outputs.rock-metas }} dry-run: ${{ github.event_name != 'push' }} From f00fdfad6c2a4fb986a41c4be5b480514b50f59b Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 17 Jul 2024 17:47:16 +0300 Subject: [PATCH 06/19] Adapt existing sanity tests to use testing harness. Signed-off-by: Nashwan Azhari --- tests/requirements-test.txt | 2 +- tests/sanity/test_harbor_core_v2_10_2.py | 74 ++++--------- tests/sanity/test_harbor_db_v2_10_2.py | 61 +++------- tests/sanity/test_harbor_exporter_v2_10_2.py | 74 ++++--------- .../sanity/test_harbor_jobservice_v2_10_2.py | 74 ++++--------- .../sanity/test_harbor_registryctl_v2_10_2.py | 104 +++++------------- 6 files changed, 101 insertions(+), 288 deletions(-) diff --git a/tests/requirements-test.txt b/tests/requirements-test.txt index b6895b5..b1ef7fd 100644 --- a/tests/requirements-test.txt +++ b/tests/requirements-test.txt @@ -4,4 +4,4 @@ PyYAML==6.0.1 tenacity==8.2.3 charmed-kubeflow-chisme>=0.4 # TODO(aznashwan): update with upstream Canonical repo when the time comes: -git+https://github.com/petrutlucian94/k8s-test-harness.git@initial +git+https://github.com/aznashwan/k8s-test-harness.git@main diff --git a/tests/sanity/test_harbor_core_v2_10_2.py b/tests/sanity/test_harbor_core_v2_10_2.py index e82f14f..2d53167 100644 --- a/tests/sanity/test_harbor_core_v2_10_2.py +++ b/tests/sanity/test_harbor_core_v2_10_2.py @@ -2,76 +2,40 @@ # See LICENSE file for licensing details. import logging -import random import pytest -import string -import subprocess import sys -from charmed_kubeflow_chisme.rock import CheckRock +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util -logger: logging.Logger = logging.getLogger(__name__) -logger.addHandler(logging.FileHandler(f"{__name__}.log")) -logger.addHandler(logging.StreamHandler(sys.stdout)) +LOG: logging.Logger = logging.getLogger(__name__) +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) -ORIGINAL_IMAGE = "docker.io/goharbor/harbor-core" -@pytest.fixture() -def rock_test_env(tmpdir): - """Yields a temporary directory and random docker container name, then cleans them up after.""" - container_name = "".join( - [str(i) for i in random.choices(string.ascii_lowercase, k=8)] - ) - yield tmpdir, container_name - - try: - subprocess.run(["docker", "rm", container_name]) - except Exception: - pass - # tmpdir fixture we use here should clean up the other files for us - - -def _list_files_in_image_dir( - image: str, container_name: str, root_dir: str="/") -> list[str]: - """Lists all regular file paths under the given dir in the given image.""" - cmd = [ - "docker", - "run", - "--rm", - "--name", - container_name, - image, - "find", - root_dir, - "-type", - "f" - ] - - proc = subprocess.run(cmd, capture_output=True) - return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] +IMAGE_NAME = "harbor-core" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" @pytest.mark.abort_on_fail -def test_rock(rock_test_env): - """Test rock.""" - _, container_name = rock_test_env - check_rock = CheckRock("rockcraft.yaml") - rock_image = check_rock.get_name() - rock_version = check_rock.get_version() - LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" - ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" +def test_compare_rock_files_to_original(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image dir_to_check = "/harbor" - original_image_files = _list_files_in_image_dir( - ORIGINAL_ROCK_IMAGE, f"{container_name}-original", - root_dir=dir_to_check) - local_rock_files = _list_files_in_image_dir( - LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + original_image_files = docker_util.list_files_under_container_image_dir( + ORIGINAL_IMAGE, root_dir=dir_to_check) + rock_image_files = docker_util.list_files_under_container_image_dir( + rock_image, root_dir=dir_to_check) - rock_fileset = set(local_rock_files) + rock_fileset = set(rock_image_files) original_fileset = set(original_image_files) original_extra_files = original_fileset - rock_fileset diff --git a/tests/sanity/test_harbor_db_v2_10_2.py b/tests/sanity/test_harbor_db_v2_10_2.py index 29e29eb..1288457 100644 --- a/tests/sanity/test_harbor_db_v2_10_2.py +++ b/tests/sanity/test_harbor_db_v2_10_2.py @@ -2,59 +2,31 @@ # See LICENSE file for licensing details. import logging -import random import pytest -import string -import subprocess import sys -from charmed_kubeflow_chisme.rock import CheckRock +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util -logger: logging.Logger = logging.getLogger(__name__) -logger.addHandler(logging.FileHandler(f"{__name__}.log")) -logger.addHandler(logging.StreamHandler(sys.stdout)) +LOG: logging.Logger = logging.getLogger(__name__) +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) -@pytest.fixture() -def rock_test_env(tmpdir): - """Yields a temporary directory and random docker container name, then cleans them up after.""" - container_name = "".join( - [str(i) for i in random.choices(string.ascii_lowercase, k=8)] - ) - yield tmpdir, container_name - try: - subprocess.run(["docker", "rm", container_name]) - except Exception: - pass - # tmpdir fixture we use here should clean up the other files for us - - - def _check_file_present_in_image(image: str, path_to_check: str): - """Checks whether a file with the given path is present within an image.""" - subprocess.run( - [ - "docker", - "run", - image, - "exec", - "ls", - "-la", - path_to_check, - ], - check=True, - ) +IMAGE_NAME = "harbor-db" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" @pytest.mark.abort_on_fail -def test_rock(rock_test_env): - """Test rock.""" - _, container_name = rock_test_env - check_rock = CheckRock("rockcraft.yaml") - rock_image = check_rock.get_name() - rock_version = check_rock.get_version() - LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" +def test_check_rock_contains_files(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image image_files_to_check = [ "/var/lib/postgresql/data", @@ -65,6 +37,5 @@ def test_rock(rock_test_env): "/docker-healthcheck.sh", "/docker-entrypoint-initdb.d/initial-registry.sql", ] - - for file in image_files_to_check: - _check_file_present_in_image(LOCAL_ROCK_IMAGE, file) + docker_util.ensure_image_contains_paths( + rock_image, image_files_to_check) diff --git a/tests/sanity/test_harbor_exporter_v2_10_2.py b/tests/sanity/test_harbor_exporter_v2_10_2.py index 2d081a4..d5ee0eb 100644 --- a/tests/sanity/test_harbor_exporter_v2_10_2.py +++ b/tests/sanity/test_harbor_exporter_v2_10_2.py @@ -2,76 +2,40 @@ # See LICENSE file for licensing details. import logging -import random import pytest -import string -import subprocess import sys -from charmed_kubeflow_chisme.rock import CheckRock +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util -logger: logging.Logger = logging.getLogger(__name__) -logger.addHandler(logging.FileHandler(f"{__name__}.log")) -logger.addHandler(logging.StreamHandler(sys.stdout)) +LOG: logging.Logger = logging.getLogger(__name__) +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) -ORIGINAL_IMAGE = "docker.io/goharbor/harbor-exporter" -@pytest.fixture() -def rock_test_env(tmpdir): - """Yields a temporary directory and random docker container name, then cleans them up after.""" - container_name = "".join( - [str(i) for i in random.choices(string.ascii_lowercase, k=8)] - ) - yield tmpdir, container_name - - try: - subprocess.run(["docker", "rm", container_name]) - except Exception: - pass - # tmpdir fixture we use here should clean up the other files for us - - -def _list_files_in_image_dir( - image: str, container_name: str, root_dir: str="/") -> list[str]: - """Lists all regular file paths under the given dir in the given image.""" - cmd = [ - "docker", - "run", - "--rm", - "--name", - container_name, - image, - "find", - root_dir, - "-type", - "f" - ] - - proc = subprocess.run(cmd, capture_output=True) - return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] +IMAGE_NAME = "harbor-exporter" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" @pytest.mark.abort_on_fail -def test_rock(rock_test_env): - """Test rock.""" - _, container_name = rock_test_env - check_rock = CheckRock("rockcraft.yaml") - rock_image = check_rock.get_name() - rock_version = check_rock.get_version() - LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" - ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" +def test_compare_rock_files_to_original(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image dir_to_check = "/harbor" - original_image_files = _list_files_in_image_dir( - ORIGINAL_ROCK_IMAGE, f"{container_name}-original", - root_dir=dir_to_check) - local_rock_files = _list_files_in_image_dir( - LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + original_image_files = docker_util.list_files_under_container_image_dir( + ORIGINAL_IMAGE, root_dir=dir_to_check) + rock_image_files = docker_util.list_files_under_container_image_dir( + rock_image, root_dir=dir_to_check) - rock_fileset = set(local_rock_files) + rock_fileset = set(rock_image_files) original_fileset = set(original_image_files) original_extra_files = original_fileset - rock_fileset diff --git a/tests/sanity/test_harbor_jobservice_v2_10_2.py b/tests/sanity/test_harbor_jobservice_v2_10_2.py index d777d37..6c288f4 100644 --- a/tests/sanity/test_harbor_jobservice_v2_10_2.py +++ b/tests/sanity/test_harbor_jobservice_v2_10_2.py @@ -2,76 +2,40 @@ # See LICENSE file for licensing details. import logging -import random import pytest -import string -import subprocess import sys -from charmed_kubeflow_chisme.rock import CheckRock +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util -logger: logging.Logger = logging.getLogger(__name__) -logger.addHandler(logging.FileHandler(f"{__name__}.log")) -logger.addHandler(logging.StreamHandler(sys.stdout)) +LOG: logging.Logger = logging.getLogger(__name__) +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) -ORIGINAL_IMAGE = "docker.io/goharbor/harbor-jobservice" -@pytest.fixture() -def rock_test_env(tmpdir): - """Yields a temporary directory and random docker container name, then cleans them up after.""" - container_name = "".join( - [str(i) for i in random.choices(string.ascii_lowercase, k=8)] - ) - yield tmpdir, container_name - - try: - subprocess.run(["docker", "rm", container_name]) - except Exception: - pass - # tmpdir fixture we use here should clean up the other files for us - - -def _list_files_in_image_dir( - image: str, container_name: str, root_dir: str="/") -> list[str]: - """Lists all regular file paths under the given dir in the given image.""" - cmd = [ - "docker", - "run", - "--rm", - "--name", - container_name, - image, - "find", - root_dir, - "-type", - "f" - ] - - proc = subprocess.run(cmd, capture_output=True) - return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] +IMAGE_NAME = "harbor-jobservice" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" @pytest.mark.abort_on_fail -def test_rock(rock_test_env): - """Test rock.""" - _, container_name = rock_test_env - check_rock = CheckRock("rockcraft.yaml") - rock_image = check_rock.get_name() - rock_version = check_rock.get_version() - LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" - ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" +def test_compare_rock_files_to_original(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image dir_to_check = "/harbor" - original_image_files = _list_files_in_image_dir( - ORIGINAL_ROCK_IMAGE, f"{container_name}-original", - root_dir=dir_to_check) - local_rock_files = _list_files_in_image_dir( - LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + original_image_files = docker_util.list_files_under_container_image_dir( + ORIGINAL_IMAGE, root_dir=dir_to_check) + rock_image_files = docker_util.list_files_under_container_image_dir( + rock_image, root_dir=dir_to_check) - rock_fileset = set(local_rock_files) + rock_fileset = set(rock_image_files) original_fileset = set(original_image_files) original_extra_files = original_fileset - rock_fileset diff --git a/tests/sanity/test_harbor_registryctl_v2_10_2.py b/tests/sanity/test_harbor_registryctl_v2_10_2.py index 5df6ba2..3aa8cbc 100644 --- a/tests/sanity/test_harbor_registryctl_v2_10_2.py +++ b/tests/sanity/test_harbor_registryctl_v2_10_2.py @@ -2,91 +2,40 @@ # See LICENSE file for licensing details. import logging -import random import pytest -import string -import subprocess import sys -from charmed_kubeflow_chisme.rock import CheckRock - -logger: logging.Logger = logging.getLogger(__name__) - -logger.addHandler(logging.FileHandler(f"{__name__}.log")) -logger.addHandler(logging.StreamHandler(sys.stdout)) - - -ORIGINAL_IMAGE = "docker.io/goharbor/harbor-registryctl" - -@pytest.fixture() -def rock_test_env(tmpdir): - """Yields a temporary directory and random docker container name, then cleans them up after.""" - container_name = "".join( - [str(i) for i in random.choices(string.ascii_lowercase, k=8)] - ) - yield tmpdir, container_name - - try: - subprocess.run(["docker", "rm", container_name]) - except Exception: - pass - # tmpdir fixture we use here should clean up the other files for us - - -def _list_files_in_image_dir( - image: str, container_name: str, root_dir: str="/") -> list[str]: - """Lists all regular file paths under the given dir in the given image.""" - cmd = [ - "docker", - "run", - "--rm", - "--name", - container_name, - image, - "find", - root_dir, - "-type", - "f" - ] - - proc = subprocess.run(cmd, capture_output=True) - return [l.decode('utf8').strip() for l in proc.stdout.splitlines()] - - -def _check_file_present_in_image(image: str, path_to_check: str): - """Checks whether a file with the given path is present within an image.""" - subprocess.run( - [ - "docker", - "run", - image, - "exec", - "ls", - "-la", - path_to_check, - ], - check=True, - ) +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util + + +LOG: logging.Logger = logging.getLogger(__name__) + +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) + + +IMAGE_NAME = "harbor-registryctl" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" + @pytest.mark.abort_on_fail -def test_rock(rock_test_env): - """Test rock.""" - _, container_name = rock_test_env - check_rock = CheckRock("rockcraft.yaml") - rock_image = check_rock.get_name() - rock_version = check_rock.get_version() - LOCAL_ROCK_IMAGE = f"{rock_image}:{rock_version}" - ORIGINAL_ROCK_IMAGE = f"{ORIGINAL_IMAGE}:{rock_version}" +def test_compare_rock_files_to_original(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image dir_to_check = "/harbor" - original_image_files = _list_files_in_image_dir( - ORIGINAL_ROCK_IMAGE, f"{container_name}-original", - root_dir=dir_to_check) - local_rock_files = _list_files_in_image_dir( - LOCAL_ROCK_IMAGE, container_name, root_dir=dir_to_check) + original_image_files = docker_util.list_files_under_container_image_dir( + ORIGINAL_IMAGE, root_dir=dir_to_check) + rock_image_files = docker_util.list_files_under_container_image_dir( + rock_image, root_dir=dir_to_check) - rock_fileset = set(local_rock_files) + rock_fileset = set(rock_image_files) original_fileset = set(original_image_files) original_extra_files = original_fileset - rock_fileset @@ -103,4 +52,5 @@ def test_rock(rock_test_env): # NOTE(aznashwan): the registryctl image also embeds a `registry` binary: # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile#L6 - _check_file_present_in_image(LOCAL_ROCK_IMAGE, "/usr/bin/registry_DO_NOT_USE_GC") + docker_util.ensure_image_contains_paths( + rock_image, ["/usr/bin/registry_DO_NOT_USE_GC"]) From 82a8c1f8fd755cb4a2683e0298fea1efc2d75253 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 17 Jul 2024 17:55:11 +0300 Subject: [PATCH 07/19] Move portal rockfile to match original image naming scheme. Signed-off-by: Nashwan Azhari --- v2.10.2/{portal => harbor-portal}/rockcraft.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename v2.10.2/{portal => harbor-portal}/rockcraft.yaml (100%) diff --git a/v2.10.2/portal/rockcraft.yaml b/v2.10.2/harbor-portal/rockcraft.yaml similarity index 100% rename from v2.10.2/portal/rockcraft.yaml rename to v2.10.2/harbor-portal/rockcraft.yaml From 2d60d6c88b88cdbebada07b58fd049792f269285 Mon Sep 17 00:00:00 2001 From: Lucian Petrut Date: Wed, 17 Jul 2024 14:34:37 +0000 Subject: [PATCH 08/19] portal: apply cypress build workaround On Ubuntu 22.04 with snap installed nodejs, one of the portal dependencies fails to install: https://github.com/cypress-io/cypress-documentation/issues/5868 We'll apply the recommended workaround. --- v2.10.2/harbor-portal/rockcraft.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2.10.2/harbor-portal/rockcraft.yaml b/v2.10.2/harbor-portal/rockcraft.yaml index 87cc99b..b84efb4 100644 --- a/v2.10.2/harbor-portal/rockcraft.yaml +++ b/v2.10.2/harbor-portal/rockcraft.yaml @@ -71,8 +71,8 @@ parts: export NPM_CONFIG_REGISTRY=https://registry.npmjs.org - export PATH="$PATH:$CRAFT_PART_BUILD/src/portal/node_modules/.bin" - npm install ng-swagger-gen + # https://github.com/cypress-io/cypress-documentation/issues/5868 + npm install cypress --save-dev --foreground-scripts npm install --unsafe-perm npm run generate-build-timestamp From 773d629fd5116155d38934ccab2749726d60865e Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 17 Jul 2024 18:16:54 +0300 Subject: [PATCH 09/19] Ensure ROCK names concide with upstream image names. Signed-off-by: Nashwan Azhari --- v2.10.2/harbor-portal/rockcraft.yaml | 2 +- v2.10.2/nginx-photon/rockcraft.yaml | 2 +- v2.10.2/redis-photon/rockcraft.yaml | 2 +- v2.10.2/registry-photon/rockcraft.yaml | 2 +- v2.10.2/trivy-adapter-photon/rockcraft.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/v2.10.2/harbor-portal/rockcraft.yaml b/v2.10.2/harbor-portal/rockcraft.yaml index b84efb4..bc7cca4 100644 --- a/v2.10.2/harbor-portal/rockcraft.yaml +++ b/v2.10.2/harbor-portal/rockcraft.yaml @@ -1,4 +1,4 @@ -name: portal +name: harbor-portal summary: Rock replacement for the Harbor Portal image. description: > This rock is a drop in replacement for the diff --git a/v2.10.2/nginx-photon/rockcraft.yaml b/v2.10.2/nginx-photon/rockcraft.yaml index 3e64a61..982d165 100644 --- a/v2.10.2/nginx-photon/rockcraft.yaml +++ b/v2.10.2/nginx-photon/rockcraft.yaml @@ -1,4 +1,4 @@ -name: nginx +name: nginx-photon summary: Rock replacement for the Harbor Nginx image. description: > This rock is a drop in replacement for the diff --git a/v2.10.2/redis-photon/rockcraft.yaml b/v2.10.2/redis-photon/rockcraft.yaml index 56fa402..f1a2a5b 100644 --- a/v2.10.2/redis-photon/rockcraft.yaml +++ b/v2.10.2/redis-photon/rockcraft.yaml @@ -1,4 +1,4 @@ -name: redis +name: redis-photon summary: Rock replacement for the Harbor Redis image. description: > This rock is a drop in replacement for the diff --git a/v2.10.2/registry-photon/rockcraft.yaml b/v2.10.2/registry-photon/rockcraft.yaml index a01c3c7..3d8d678 100644 --- a/v2.10.2/registry-photon/rockcraft.yaml +++ b/v2.10.2/registry-photon/rockcraft.yaml @@ -1,4 +1,4 @@ -name: registry +name: registry-photon summary: Rock replacement for the Harbor registry image. description: > This rock is a drop in replacement for the diff --git a/v2.10.2/trivy-adapter-photon/rockcraft.yaml b/v2.10.2/trivy-adapter-photon/rockcraft.yaml index d7db7a2..f2d5cc2 100644 --- a/v2.10.2/trivy-adapter-photon/rockcraft.yaml +++ b/v2.10.2/trivy-adapter-photon/rockcraft.yaml @@ -1,4 +1,4 @@ -name: trivy-adapter +name: trivy-adapter-photon summary: Rock replacement for the Harbor Trivy adapter image. description: > This rock is a drop in replacement for the From 027516af8fc57a1a23362a616668f6cef8a9086d Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 17 Jul 2024 18:37:25 +0300 Subject: [PATCH 10/19] Remove test failure limit from pytest. Signed-off-by: Nashwan Azhari --- tests/tox.ini | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/tox.ini b/tests/tox.ini index 22a9094..7e41dba 100644 --- a/tests/tox.ini +++ b/tests/tox.ini @@ -45,7 +45,6 @@ deps = -r {tox_root}/requirements-test.txt commands = pytest -v \ - --maxfail 1 \ --tb native \ --log-cli-level DEBUG \ --disable-warnings \ @@ -61,7 +60,6 @@ deps = -r {tox_root}/requirements-test.txt commands = pytest -v \ - --maxfail 1 \ --tb native \ --log-cli-level DEBUG \ --disable-warnings \ From edc9c79fdd0a2e1c13c6f16a709d37a62029adf4 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 17 Jul 2024 19:07:35 +0300 Subject: [PATCH 11/19] Add sanity tests for non-core Harbor ROCKs. Signed-off-by: Nashwan Azhari --- tests/sanity/test_harbor_portal_v2_10_2.py | 75 +++++++++++++++++++ tests/sanity/test_nginx_photon_v2_10_2.py | 35 +++++++++ tests/sanity/test_redis_photon_v2_10_2.py | 38 ++++++++++ tests/sanity/test_registry_photon_v2_10_2.py | 58 ++++++++++++++ .../test_trivy_adapter_photon_v2_10_2.py | 58 ++++++++++++++ 5 files changed, 264 insertions(+) create mode 100644 tests/sanity/test_harbor_portal_v2_10_2.py create mode 100644 tests/sanity/test_nginx_photon_v2_10_2.py create mode 100644 tests/sanity/test_redis_photon_v2_10_2.py create mode 100644 tests/sanity/test_registry_photon_v2_10_2.py create mode 100644 tests/sanity/test_trivy_adapter_photon_v2_10_2.py diff --git a/tests/sanity/test_harbor_portal_v2_10_2.py b/tests/sanity/test_harbor_portal_v2_10_2.py new file mode 100644 index 0000000..90b7dc5 --- /dev/null +++ b/tests/sanity/test_harbor_portal_v2_10_2.py @@ -0,0 +1,75 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import pytest +import sys + +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util + + +LOG: logging.Logger = logging.getLogger(__name__) + +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) + + +IMAGE_NAME = "harbor-portal" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" + + +@pytest.mark.abort_on_fail +def test_check_rock_contains_files(rock_test_env): + """Test ROCK contains expected files""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image + + image_files_to_check = [ + # Nginx-related dirs: + "/home/nginx", + "/var/log/nginx", + ] + docker_util.ensure_image_contains_paths( + rock_image, image_files_to_check) + +@pytest.mark.abort_on_fail +def test_compare_rock_files_to_original(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image + + dir_to_check = "/usr/share/nginx/html" + + original_image_files = docker_util.list_files_under_container_image_dir( + ORIGINAL_IMAGE, root_dir=dir_to_check) + rock_image_files = docker_util.list_files_under_container_image_dir( + rock_image, root_dir=dir_to_check) + + rock_fileset = set(rock_image_files) + original_fileset = set(original_image_files) + + original_extra_files = original_fileset - rock_fileset + if original_extra_files: + pytest.fail( + f"Missing some files from the original image: " + f"{original_extra_files}") + + rock_extra_files = rock_fileset - original_fileset + if rock_extra_files: + pytest.fail( + f"Rock has extra files not present in original image: " + f"{rock_extra_files}") + + # Nginx-related dirs: + image_files_to_check = [ + "/home/nginx", + "/var/log/nginx", + ] + docker_util.ensure_image_contains_paths( + rock_image, image_files_to_check) diff --git a/tests/sanity/test_nginx_photon_v2_10_2.py b/tests/sanity/test_nginx_photon_v2_10_2.py new file mode 100644 index 0000000..c754006 --- /dev/null +++ b/tests/sanity/test_nginx_photon_v2_10_2.py @@ -0,0 +1,35 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import pytest +import sys + +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util + + +LOG: logging.Logger = logging.getLogger(__name__) + +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) + +IMAGE_NAME = "nginx-photon" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" + + +@pytest.mark.abort_on_fail +def test_check_rock_contains_files(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image + + image_files_to_check = [ + "/home/nginx", + "/var/log/nginx" + ] + docker_util.ensure_image_contains_paths( + rock_image, image_files_to_check) diff --git a/tests/sanity/test_redis_photon_v2_10_2.py b/tests/sanity/test_redis_photon_v2_10_2.py new file mode 100644 index 0000000..54be9e3 --- /dev/null +++ b/tests/sanity/test_redis_photon_v2_10_2.py @@ -0,0 +1,38 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import pytest +import sys + +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util + + +LOG: logging.Logger = logging.getLogger(__name__) + +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) + + +IMAGE_NAME = "redis-photon" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" + + +@pytest.mark.abort_on_fail +def test_rock_contains_files(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image + + paths_to_check = [ + "/usr/bin/docker-healthcheck", + "/etc/redis.conf", + "/var/lib/redis", + ] + docker_util.ensure_image_contains_paths( + rock_image, paths_to_check) + diff --git a/tests/sanity/test_registry_photon_v2_10_2.py b/tests/sanity/test_registry_photon_v2_10_2.py new file mode 100644 index 0000000..653133b --- /dev/null +++ b/tests/sanity/test_registry_photon_v2_10_2.py @@ -0,0 +1,58 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import pytest +import sys + +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util + + +LOG: logging.Logger = logging.getLogger(__name__) + +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) + + +IMAGE_NAME = "registry-photon" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" + + +@pytest.mark.abort_on_fail +def test_compare_rock_files_to_original(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image + + dir_to_check = "/home/harbor" + + original_image_files = docker_util.list_files_under_container_image_dir( + ORIGINAL_IMAGE, root_dir=dir_to_check) + rock_image_files = docker_util.list_files_under_container_image_dir( + rock_image, root_dir=dir_to_check) + + rock_fileset = set(rock_image_files) + original_fileset = set(original_image_files) + + original_extra_files = original_fileset - rock_fileset + if original_extra_files: + pytest.fail( + f"Missing some files from the original image: " + f"{original_extra_files}") + + rock_extra_files = rock_fileset - original_fileset + if rock_extra_files: + pytest.fail( + f"Rock has extra files not present in original image: " + f"{rock_extra_files}") + + paths_to_check = [ + "/usr/bin/registry_DO_NOT_USE_GC", + "/etc/pki/tls/certs", + ] + docker_util.ensure_image_contains_paths( + rock_image, paths_to_check) diff --git a/tests/sanity/test_trivy_adapter_photon_v2_10_2.py b/tests/sanity/test_trivy_adapter_photon_v2_10_2.py new file mode 100644 index 0000000..feffb3c --- /dev/null +++ b/tests/sanity/test_trivy_adapter_photon_v2_10_2.py @@ -0,0 +1,58 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +import logging +import pytest +import sys + +from k8s_test_harness.util import docker_util +from k8s_test_harness.util import env_util + + +LOG: logging.Logger = logging.getLogger(__name__) + +LOG.addHandler(logging.FileHandler(f"{__name__}.log")) +LOG.addHandler(logging.StreamHandler(sys.stdout)) + + +IMAGE_NAME = "trivy-adapter-photon" +IMAGE_TAG = "v2.10.2" +ORIGINAL_IMAGE = f"docker.io/goharbor/{IMAGE_NAME}:{IMAGE_TAG}" + + +@pytest.mark.abort_on_fail +def test_compare_rock_files_to_original(rock_test_env): + """Test ROCK contains same fileset as original image.""" + + rock_meta = env_util.get_build_meta_info_for_rock_version( + IMAGE_NAME, IMAGE_TAG, "amd64") + rock_image = rock_meta.image + + dir_to_check = "/home/scanner" + + original_image_files = docker_util.list_files_under_container_image_dir( + ORIGINAL_IMAGE, root_dir=dir_to_check) + rock_image_files = docker_util.list_files_under_container_image_dir( + rock_image, root_dir=dir_to_check) + + rock_fileset = set(rock_image_files) + original_fileset = set(original_image_files) + + original_extra_files = original_fileset - rock_fileset + if original_extra_files: + pytest.fail( + f"Missing some files from the original image: " + f"{original_extra_files}") + + rock_extra_files = rock_fileset - original_fileset + if rock_extra_files: + pytest.fail( + f"Rock has extra files not present in original image: " + f"{rock_extra_files}") + + paths_to_check = [ + "/etc/pki/tls/certs", + ] + docker_util.ensure_image_contains_paths( + rock_image, paths_to_check) + From ba257ca71d0763ff403df57773a52434a3e0c0cf Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 18 Jul 2024 00:13:07 +0300 Subject: [PATCH 12/19] Remove residual testing fixture. Signed-off-by: Nashwan Azhari --- tests/sanity/test_harbor_core_v2_10_2.py | 2 +- tests/sanity/test_harbor_db_v2_10_2.py | 2 +- tests/sanity/test_harbor_exporter_v2_10_2.py | 2 +- tests/sanity/test_harbor_jobservice_v2_10_2.py | 2 +- tests/sanity/test_harbor_portal_v2_10_2.py | 4 ++-- tests/sanity/test_harbor_registryctl_v2_10_2.py | 2 +- tests/sanity/test_nginx_photon_v2_10_2.py | 2 +- tests/sanity/test_redis_photon_v2_10_2.py | 2 +- tests/sanity/test_registry_photon_v2_10_2.py | 2 +- tests/sanity/test_trivy_adapter_photon_v2_10_2.py | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/sanity/test_harbor_core_v2_10_2.py b/tests/sanity/test_harbor_core_v2_10_2.py index 2d53167..479ba8a 100644 --- a/tests/sanity/test_harbor_core_v2_10_2.py +++ b/tests/sanity/test_harbor_core_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_compare_rock_files_to_original(rock_test_env): +def test_compare_rock_files_to_original(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_harbor_db_v2_10_2.py b/tests/sanity/test_harbor_db_v2_10_2.py index 1288457..e3c3300 100644 --- a/tests/sanity/test_harbor_db_v2_10_2.py +++ b/tests/sanity/test_harbor_db_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_check_rock_contains_files(rock_test_env): +def test_check_rock_contains_files(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_harbor_exporter_v2_10_2.py b/tests/sanity/test_harbor_exporter_v2_10_2.py index d5ee0eb..3b5f181 100644 --- a/tests/sanity/test_harbor_exporter_v2_10_2.py +++ b/tests/sanity/test_harbor_exporter_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_compare_rock_files_to_original(rock_test_env): +def test_compare_rock_files_to_original(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_harbor_jobservice_v2_10_2.py b/tests/sanity/test_harbor_jobservice_v2_10_2.py index 6c288f4..cda397c 100644 --- a/tests/sanity/test_harbor_jobservice_v2_10_2.py +++ b/tests/sanity/test_harbor_jobservice_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_compare_rock_files_to_original(rock_test_env): +def test_compare_rock_files_to_original(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_harbor_portal_v2_10_2.py b/tests/sanity/test_harbor_portal_v2_10_2.py index 90b7dc5..1a1ed3c 100644 --- a/tests/sanity/test_harbor_portal_v2_10_2.py +++ b/tests/sanity/test_harbor_portal_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_check_rock_contains_files(rock_test_env): +def test_check_rock_contains_files(): """Test ROCK contains expected files""" rock_meta = env_util.get_build_meta_info_for_rock_version( @@ -37,7 +37,7 @@ def test_check_rock_contains_files(rock_test_env): rock_image, image_files_to_check) @pytest.mark.abort_on_fail -def test_compare_rock_files_to_original(rock_test_env): +def test_compare_rock_files_to_original(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_harbor_registryctl_v2_10_2.py b/tests/sanity/test_harbor_registryctl_v2_10_2.py index 3aa8cbc..9753f0b 100644 --- a/tests/sanity/test_harbor_registryctl_v2_10_2.py +++ b/tests/sanity/test_harbor_registryctl_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_compare_rock_files_to_original(rock_test_env): +def test_compare_rock_files_to_original(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_nginx_photon_v2_10_2.py b/tests/sanity/test_nginx_photon_v2_10_2.py index c754006..587b0b7 100644 --- a/tests/sanity/test_nginx_photon_v2_10_2.py +++ b/tests/sanity/test_nginx_photon_v2_10_2.py @@ -20,7 +20,7 @@ @pytest.mark.abort_on_fail -def test_check_rock_contains_files(rock_test_env): +def test_check_rock_contains_files(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_redis_photon_v2_10_2.py b/tests/sanity/test_redis_photon_v2_10_2.py index 54be9e3..54b84b2 100644 --- a/tests/sanity/test_redis_photon_v2_10_2.py +++ b/tests/sanity/test_redis_photon_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_rock_contains_files(rock_test_env): +def test_rock_contains_files(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_registry_photon_v2_10_2.py b/tests/sanity/test_registry_photon_v2_10_2.py index 653133b..71b3972 100644 --- a/tests/sanity/test_registry_photon_v2_10_2.py +++ b/tests/sanity/test_registry_photon_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_compare_rock_files_to_original(rock_test_env): +def test_compare_rock_files_to_original(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( diff --git a/tests/sanity/test_trivy_adapter_photon_v2_10_2.py b/tests/sanity/test_trivy_adapter_photon_v2_10_2.py index feffb3c..35db4d2 100644 --- a/tests/sanity/test_trivy_adapter_photon_v2_10_2.py +++ b/tests/sanity/test_trivy_adapter_photon_v2_10_2.py @@ -21,7 +21,7 @@ @pytest.mark.abort_on_fail -def test_compare_rock_files_to_original(rock_test_env): +def test_compare_rock_files_to_original(): """Test ROCK contains same fileset as original image.""" rock_meta = env_util.get_build_meta_info_for_rock_version( From ef8cbcc60160623cb05d5fdef8088c0a0fdb24e7 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 18 Jul 2024 00:48:35 +0300 Subject: [PATCH 13/19] Ensure BUILT_ROCKS_METADATA gets passed to tests. Signed-off-by: Nashwan Azhari --- tests/tox.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/tox.ini b/tests/tox.ini index 7e41dba..bd66611 100644 --- a/tests/tox.ini +++ b/tests/tox.ini @@ -51,6 +51,7 @@ commands = {posargs} \ {tox_root}/sanity pass_env = + BUILT_ROCKS_METADATA TEST_* ROCK_* @@ -66,6 +67,7 @@ commands = {posargs} \ {tox_root}/integration pass_env = + BUILT_ROCKS_METADATA TEST_* ROCK_* From 8728fd93d009fb2de9c9d4e2eaaccf7d1b80d765 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 18 Jul 2024 13:21:16 +0300 Subject: [PATCH 14/19] Update all rockcraft version tags to match originals Signed-off-by: Nashwan Azhari --- v2.10.2/harbor-core/rockcraft.yaml | 2 +- v2.10.2/harbor-db/rockcraft.yaml | 2 +- v2.10.2/harbor-exporter/rockcraft.yaml | 2 +- v2.10.2/harbor-jobservice/rockcraft.yaml | 2 +- v2.10.2/harbor-registryctl/rockcraft.yaml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/v2.10.2/harbor-core/rockcraft.yaml b/v2.10.2/harbor-core/rockcraft.yaml index e37d2c9..3443d3a 100644 --- a/v2.10.2/harbor-core/rockcraft.yaml +++ b/v2.10.2/harbor-core/rockcraft.yaml @@ -10,7 +10,7 @@ description: | Packages the core service of Harbor. license: Apache-2.0 -version: "2.10.2" +version: "v2.10.2" # NOTE(aznashwan): the base for the core image is VMware's Photon, # but rockcraft only currently supports bare/ubuntu-based bases. diff --git a/v2.10.2/harbor-db/rockcraft.yaml b/v2.10.2/harbor-db/rockcraft.yaml index 6965afc..108458c 100644 --- a/v2.10.2/harbor-db/rockcraft.yaml +++ b/v2.10.2/harbor-db/rockcraft.yaml @@ -11,7 +11,7 @@ description: | Packages the PostgreSQL DB of Harbor. license: Apache-2.0 -version: "2.10.2" +version: "v2.10.2" # NOTE(aznashwan): the base for the DB image is VMware's Photon, # but rockcraft only currently supports bare/ubuntu-based bases. diff --git a/v2.10.2/harbor-exporter/rockcraft.yaml b/v2.10.2/harbor-exporter/rockcraft.yaml index 7af2e09..0fd8d7b 100644 --- a/v2.10.2/harbor-exporter/rockcraft.yaml +++ b/v2.10.2/harbor-exporter/rockcraft.yaml @@ -10,7 +10,7 @@ description: | Packages the Expoter of Harbor. license: Apache-2.0 -version: "2.10.2" +version: "v2.10.2" # NOTE(aznashwan): the base for the expoter image is VMware's Photon, # but rockcraft only currently supports bare/ubuntu-based bases. diff --git a/v2.10.2/harbor-jobservice/rockcraft.yaml b/v2.10.2/harbor-jobservice/rockcraft.yaml index f3f6cea..8cd1f67 100644 --- a/v2.10.2/harbor-jobservice/rockcraft.yaml +++ b/v2.10.2/harbor-jobservice/rockcraft.yaml @@ -10,7 +10,7 @@ description: | Packages the Jobservice of Harbor. license: Apache-2.0 -version: "2.10.2" +version: "v2.10.2" # NOTE(aznashwan): the base for the jobservice image is VMware's Photon, # but rockcraft only currently supports bare/ubuntu-based bases. diff --git a/v2.10.2/harbor-registryctl/rockcraft.yaml b/v2.10.2/harbor-registryctl/rockcraft.yaml index 86133e8..5639d84 100644 --- a/v2.10.2/harbor-registryctl/rockcraft.yaml +++ b/v2.10.2/harbor-registryctl/rockcraft.yaml @@ -10,7 +10,7 @@ description: | Packages the Registryctl service of Harbor. license: Apache-2.0 -version: "2.10.2" +version: "v2.10.2" # NOTE(aznashwan): the base for the registryctl image is VMware's Photon, # but rockcraft only currently supports bare/ubuntu-based bases. From eec8f67147b1630362c17c7d78ac7309d524356e Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 18 Jul 2024 15:01:21 +0300 Subject: [PATCH 15/19] Fix sanity tests. Signed-off-by: Nashwan Azhari --- tests/sanity/test_harbor_db_v2_10_2.py | 2 +- tests/sanity/test_harbor_portal_v2_10_2.py | 17 +++++++++++++++-- tests/sanity/test_harbor_registryctl_v2_10_2.py | 2 +- v2.10.2/harbor-core/rockcraft.yaml | 4 ++-- v2.10.2/harbor-registryctl/rockcraft.yaml | 16 +++++++++------- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/tests/sanity/test_harbor_db_v2_10_2.py b/tests/sanity/test_harbor_db_v2_10_2.py index e3c3300..4a8c270 100644 --- a/tests/sanity/test_harbor_db_v2_10_2.py +++ b/tests/sanity/test_harbor_db_v2_10_2.py @@ -30,7 +30,7 @@ def test_check_rock_contains_files(): image_files_to_check = [ "/var/lib/postgresql/data", - "/run/postgresq", + "/run/postgresql", "/docker-entrypoint.sh", "/initdb.sh", "/upgrade.sh", diff --git a/tests/sanity/test_harbor_portal_v2_10_2.py b/tests/sanity/test_harbor_portal_v2_10_2.py index 1a1ed3c..1cbfa5d 100644 --- a/tests/sanity/test_harbor_portal_v2_10_2.py +++ b/tests/sanity/test_harbor_portal_v2_10_2.py @@ -3,6 +3,7 @@ import logging import pytest +import re import sys from k8s_test_harness.util import docker_util @@ -51,8 +52,20 @@ def test_compare_rock_files_to_original(): rock_image_files = docker_util.list_files_under_container_image_dir( rock_image, root_dir=dir_to_check) - rock_fileset = set(rock_image_files) - original_fileset = set(original_image_files) + # NOTE(aznashwan): the names of main.js have randomized tags: + main_js_re = re.compile('(/usr/share/nginx/html/main\\..*\\.js)') + original_image_main = [ + f for f in original_image_files if main_js_re.match(f)] + rock_image_main = [ + f for f in rock_image_files if main_js_re.match(f)] + if original_image_main and not rock_image_main: + pytest.fail( + f"ROCK image seems to be missing a main.*.js file. " + f"Original image's main: {original_image_main}. All " + f"ROCK files under {dir_to_check}: {rock_image_files}") + + rock_fileset = set(rock_image_files) - set(rock_image_main) + original_fileset = set(original_image_files) - set(original_image_main) original_extra_files = original_fileset - rock_fileset if original_extra_files: diff --git a/tests/sanity/test_harbor_registryctl_v2_10_2.py b/tests/sanity/test_harbor_registryctl_v2_10_2.py index 9753f0b..52b9f3a 100644 --- a/tests/sanity/test_harbor_registryctl_v2_10_2.py +++ b/tests/sanity/test_harbor_registryctl_v2_10_2.py @@ -28,7 +28,7 @@ def test_compare_rock_files_to_original(): IMAGE_NAME, IMAGE_TAG, "amd64") rock_image = rock_meta.image - dir_to_check = "/harbor" + dir_to_check = "/home/harbor" original_image_files = docker_util.list_files_under_container_image_dir( ORIGINAL_IMAGE, root_dir=dir_to_check) diff --git a/v2.10.2/harbor-core/rockcraft.yaml b/v2.10.2/harbor-core/rockcraft.yaml index 3443d3a..1d6590d 100644 --- a/v2.10.2/harbor-core/rockcraft.yaml +++ b/v2.10.2/harbor-core/rockcraft.yaml @@ -127,7 +127,7 @@ parts: cp -r ./make/migrations "$OUTDIR/migrations" cp -r ./src/core/views "$OUTDIR/views" mkdir -p "$OUTDIR/icons" - cp -r ./icons "$OUTDIR/icons" + cp -r ./icons/* "$OUTDIR/icons" mkdir -p "$CRAFT_PART_INSTALL/etc/pki/tls/certs" chown -R 10000:10000 "$CRAFT_PART_INSTALL/etc/pki/tls/certs" @@ -140,7 +140,7 @@ parts: # the one used in the rockcraft workflows, so this is techinically a lie: # canonical/k8s-workflows/.github/workflows/scan_images.yaml MAKEFILE="$CRAFT_PART_SRC/Makefile" - VERSION_FILE="$CRAFT_PART_INSTALL/harbor/version" + VERSION_FILE="$CRAFT_PART_INSTALL/harbor/versions" echo "VERSION_TAG: v2.10.2" >> "$VERSION_FILE" sed -E -n "s/^(REGISTRYVERSION=(.*))$/REGISTRY_VERSION: \2/p" "$MAKEFILE" >> "$VERSION_FILE" sed -E -n "s/^(TRIVYVERSION=(.*))$/TRIVY_VERSION: \2/p" "$MAKEFILE" >> "$VERSION_FILE" diff --git a/v2.10.2/harbor-registryctl/rockcraft.yaml b/v2.10.2/harbor-registryctl/rockcraft.yaml index 5639d84..01aca86 100644 --- a/v2.10.2/harbor-registryctl/rockcraft.yaml +++ b/v2.10.2/harbor-registryctl/rockcraft.yaml @@ -29,11 +29,11 @@ services: # NOTE(aznashwan) set start.sh for compatibility with upstream image. # All it does is run `./make/photon/common/install_cert.sh` and exec `harbor_registryctl`. # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile#L21 - command: /harbor/start.sh + command: /home/harbor/start.sh user: harbor group: harbor - working-dir: /harbor + working-dir: /home/harbor # TODO(aznashwan): original Docker image includes Healthcheck should/can we also? # https://github.com/goharbor/harbor/blob/v2.10.2/make/photon/registryctl/Dockerfile#L17 @@ -72,7 +72,7 @@ parts: cd $CRAFT_PART_SRC # Copy over auxiliary files: - OUTDIR="$CRAFT_PART_INSTALL/harbor" + OUTDIR="$CRAFT_PART_INSTALL/home/harbor" mkdir -p "$OUTDIR" cp ./make/photon/common/install_cert.sh "$OUTDIR/" @@ -153,9 +153,11 @@ parts: -o "$CRAFT_PART_BUILD/harbor_registryctl" # Copy over binary and set appropriate permissions: - mkdir -p $CRAFT_PART_INSTALL/harbor - cp $CRAFT_PART_BUILD/harbor_registryctl $CRAFT_PART_INSTALL/harbor + OUTDIR="$CRAFT_PART_INSTALL/home/harbor" + mkdir -p "$OUTDIR" + + cp $CRAFT_PART_BUILD/harbor_registryctl "$OUTDIR/" - chown 10000:10000 "$CRAFT_PART_INSTALL/harbor/harbor_registryctl" - chmod u+x "$CRAFT_PART_INSTALL/harbor/harbor_registryctl" + chown 10000:10000 "$OUTDIR/harbor_registryctl" + chmod u+x "$OUTDIR/harbor_registryctl" From ba55e5f00973261137c1a4eb6138c43db73157de Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Fri, 19 Jul 2024 13:57:10 +0300 Subject: [PATCH 16/19] Update conftest. Signed-off-by: Nashwan Azhari --- tests/integration/conftest.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 13128c5..1ca658c 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1 +1,5 @@ -pytest_plugins = ["k8s_test_harness.conftest"] +# +# Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details +# +pytest_plugins = ["k8s_test_harness.plugin"] From afaf4532b30a6a4a4b9caac9db2ad8bc692b9fd4 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Fri, 19 Jul 2024 13:58:52 +0300 Subject: [PATCH 17/19] Switch to using upstream Canonical GH workflows. Signed-off-by: Nashwan Azhari --- .github/workflows/pull_request.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index 938707b..4f8180f 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -8,9 +8,7 @@ on: jobs: build-and-push-arch-specifics: name: Build Rocks and Push Arch Specific Images - # HACK(aznashwan): replace with upstream canonical-owned - # workflow once github.com/canonical/k8s-workflows/pull/13 merges. - uses: aznashwan/k8s-workflows/.github/workflows/build_rocks.yaml@main + uses: canonical/k8s-workflows/.github/workflows/build_rocks.yaml@main with: owner: ${{ github.repository_owner }} trivy-image-config: "trivy.yaml" @@ -22,9 +20,7 @@ jobs: platform-labels: '{"arm64": ["Ubuntu_ARM64_4C_16G_01"]}' run-tests: - # HACK(aznashwan): replace with upstream canonical-owned - # workflow once github.com/canonical/k8s-workflows/pull/15 merges. - uses: aznashwan/k8s-workflows/.github/workflows/run_tests.yaml@main + uses: canonical/k8s-workflows/.github/workflows/run_tests.yaml@main needs: [build-and-push-arch-specifics] secrets: inherit with: From 6ac5a97eb686f23361de71117bad4b5dc9fd741e Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Fri, 19 Jul 2024 14:13:06 +0300 Subject: [PATCH 18/19] Update READMEs. Signed-off-by: Nashwan Azhari --- README.md | 17 ++++++++++++++++- v2.10.2/harbor-core/README.md | 2 +- v2.10.2/harbor-db/README.md | 2 +- v2.10.2/harbor-jobservice/README.md | 2 +- v2.10.2/harbor-portal/README.md | 3 +++ v2.10.2/harbor-registryctl/README.md | 2 +- v2.10.2/nginx-photon/README.md | 3 +++ v2.10.2/redis-photon/README.md | 3 +++ v2.10.2/registry-photon/README.md | 3 +++ v2.10.2/trivy-adapter-photon/README.md | 3 +++ 10 files changed, 35 insertions(+), 5 deletions(-) create mode 100644 v2.10.2/harbor-portal/README.md create mode 100644 v2.10.2/nginx-photon/README.md create mode 100644 v2.10.2/redis-photon/README.md create mode 100644 v2.10.2/registry-photon/README.md create mode 100644 v2.10.2/trivy-adapter-photon/README.md diff --git a/README.md b/README.md index d95105a..04a87b2 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,17 @@ # harbor-rocks -Rocks for Harbor + +OCI images for [Harbor](https://github.com/giharbor/harbor) components built using [rockcraft](https://github.com/canonical/rockcraft). + +Images are meant to be Ubuntu-based drop-in replacements for the following +upstream images: + +* docker.io/goharbor/nginx-photon +* docker.io/goharbor/harbor-portal +* docker.io/goharbor/harbor-core +* docker.io/goharbor/harbor-jobservice +* docker.io/goharbor/registry-photon +* docker.io/goharbor/harbor-registryctl +* docker.io/goharbor/trivy-adapter-photon +* docker.io/goharbor/harbor-db +* docker.io/goharbor/redis-photon +* docker.io/goharbor/harbor-exporter diff --git a/v2.10.2/harbor-core/README.md b/v2.10.2/harbor-core/README.md index 94846b0..e9b5c49 100644 --- a/v2.10.2/harbor-core/README.md +++ b/v2.10.2/harbor-core/README.md @@ -1,3 +1,3 @@ # ROCK specs for harbor-core. -Aims to be compatible with `docker.io/goharbor/harbor-core`. +Aims to be compatible with `docker.io/goharbor/harbor-core:v2.10.2`. diff --git a/v2.10.2/harbor-db/README.md b/v2.10.2/harbor-db/README.md index 3cb8a5d..2f57e55 100644 --- a/v2.10.2/harbor-db/README.md +++ b/v2.10.2/harbor-db/README.md @@ -1,3 +1,3 @@ # ROCK specs for harbor-db. -Aims to be compatible with `docker.io/goharbor/harbor-db`. +Aims to be compatible with `docker.io/goharbor/harbor-db:v2.10.2`. diff --git a/v2.10.2/harbor-jobservice/README.md b/v2.10.2/harbor-jobservice/README.md index 3aa44e1..793f8e8 100644 --- a/v2.10.2/harbor-jobservice/README.md +++ b/v2.10.2/harbor-jobservice/README.md @@ -1,3 +1,3 @@ # ROCK specs for harbor-jobservice. -Aims to be compatible with `docker.io/goharbor/harbor-jobservice`. +Aims to be compatible with `docker.io/goharbor/harbor-jobservice:v2.10.2`. diff --git a/v2.10.2/harbor-portal/README.md b/v2.10.2/harbor-portal/README.md new file mode 100644 index 0000000..efb04ee --- /dev/null +++ b/v2.10.2/harbor-portal/README.md @@ -0,0 +1,3 @@ +# ROCK specs for harbor-portal. + +Aims to be compatible with `docker.io/goharbor/harbor-portal:v2.10.2`. diff --git a/v2.10.2/harbor-registryctl/README.md b/v2.10.2/harbor-registryctl/README.md index ca33e10..0e7fbd2 100644 --- a/v2.10.2/harbor-registryctl/README.md +++ b/v2.10.2/harbor-registryctl/README.md @@ -1,3 +1,3 @@ # ROCK specs for harbor-registryctl. -Aims to be compatible with `docker.io/goharbor/harbor-registryctl`. +Aims to be compatible with `docker.io/goharbor/harbor-registryctl:v2.10.2`. diff --git a/v2.10.2/nginx-photon/README.md b/v2.10.2/nginx-photon/README.md new file mode 100644 index 0000000..c33d9e4 --- /dev/null +++ b/v2.10.2/nginx-photon/README.md @@ -0,0 +1,3 @@ +# ROCK specs for nginx-photon. + +Aims to be compatible with `docker.io/goharbor/nginx-photon:v2.10.2`. diff --git a/v2.10.2/redis-photon/README.md b/v2.10.2/redis-photon/README.md new file mode 100644 index 0000000..c105219 --- /dev/null +++ b/v2.10.2/redis-photon/README.md @@ -0,0 +1,3 @@ +# ROCK specs for redis-photon. + +Aims to be compatible with `docker.io/goharbor/redis-photon:v2.10.2`. diff --git a/v2.10.2/registry-photon/README.md b/v2.10.2/registry-photon/README.md new file mode 100644 index 0000000..9bde579 --- /dev/null +++ b/v2.10.2/registry-photon/README.md @@ -0,0 +1,3 @@ +# ROCK specs for registry-photon. + +Aims to be compatible with `docker.io/goharbor/registry-photon:v2.10.2`. diff --git a/v2.10.2/trivy-adapter-photon/README.md b/v2.10.2/trivy-adapter-photon/README.md new file mode 100644 index 0000000..9a44c51 --- /dev/null +++ b/v2.10.2/trivy-adapter-photon/README.md @@ -0,0 +1,3 @@ +# ROCK specs for trivy-adapter-photon. + +Aims to be compatible with `docker.io/goharbor/trivy-adapter-photon:v2.10.2`. From a1c7ccbbce156d58b98a0f7d796a47a8f691ae4f Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Fri, 19 Jul 2024 15:12:15 +0300 Subject: [PATCH 19/19] Update ARM workflow runners platform-labels. Signed-off-by: Nashwan Azhari --- .github/workflows/pull_request.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pull_request.yaml b/.github/workflows/pull_request.yaml index 4f8180f..3ebb6d4 100644 --- a/.github/workflows/pull_request.yaml +++ b/.github/workflows/pull_request.yaml @@ -17,7 +17,7 @@ jobs: # pinning to use rockcraft 1.3.0 feature `entrypoint-service` rockcraft-revisions: '{"amd64": "1783", "arm64": "1784"}' arch-skipping-maximize-build-space: '["arm64"]' - platform-labels: '{"arm64": ["Ubuntu_ARM64_4C_16G_01"]}' + platform-labels: '{"arm64": ["self-hosted", "Linux", "ARM64", "jammy"]}' run-tests: uses: canonical/k8s-workflows/.github/workflows/run_tests.yaml@main