Skip to content

Commit

Permalink
[Addon kubevela#579] Reformat cue file via cue fmt command
Browse files Browse the repository at this point in the history
Signed-off-by: yanghua <[email protected]>
  • Loading branch information
yanghua committed Feb 28, 2023
1 parent 843319f commit 06c2795
Show file tree
Hide file tree
Showing 3 changed files with 131 additions and 131 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@ template: {
// +usage=Specify the spark application name
name: string
// +usage=Specify the namespace for spark application to install
namespace: string
namespace: string
// +usage=Specify the application language type, e.g. "Scala", "Python", "Java" or "R"
type: string
// +usage=Specify the python version
pythonVersion ?: string
pythonVersion?: string
// +usage=Specify the deploy mode, e.go "cluster", "client" or "in-cluster-client"
mode: string
// +usage=Specify the container image for the driver, executor, and init-container
Expand All @@ -33,58 +33,57 @@ template: {
// +usage=Specify the number of CPU cores to request for the executor pod
executorCores: int
// +usage=Specify a list of arguments to be passed to the application
arguments ?: [...string]
arguments?: [...string]
// +usage=Specify the config information carries user-specified Spark configuration properties as they would use the "--conf" option in spark-submit
sparkConf ?: [string]: string
sparkConf?: [string]: string
// +usage=Specify the config information carries user-specified Hadoop configuration properties as they would use the the "--conf" option in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop configuration properties
hadoopConf ?: [string]: string
hadoopConf?: [string]: string
// +usage=Specify the name of the ConfigMap containing Spark configuration files such as log4j.properties. The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to
sparkConfigMap ?: string
sparkConfigMap?: string
// +usage=Specify the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to
hadoopConfigMap ?: string

hadoopConfigMap?: string
}

output: {
kind: "ClusterRoleBinding"
apiVersion: "rbac.authorization.k8s.io/v1"
metadata: name: parameter.name
roleRef: {
name: "edit"
apiGroup: "rbac.authorization.k8s.io"
kind: "ClusterRole"
}
subjects: [{
name: "default"
kind: "ServiceAccount"
namespace: parameter.namespace
}]
}
output: {
kind: "ClusterRoleBinding"
apiVersion: "rbac.authorization.k8s.io/v1"
metadata: name: parameter.name
roleRef: {
name: "edit"
apiGroup: "rbac.authorization.k8s.io"
kind: "ClusterRole"
}
subjects: [{
name: "default"
kind: "ServiceAccount"
namespace: parameter.namespace
}]
}

outputs: {

"spark": {
kind: "SparkApplication"
apiVersion: "sparkoperator.k8s.io/v1beta2"
metadata: {
name: parameter.name
kind: "SparkApplication"
apiVersion: "sparkoperator.k8s.io/v1beta2"
metadata: {
name: parameter.name
namespace: parameter.namespace
}
spec: {
type: parameter.type
mode: parameter.mode
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
mainClass: parameter.mainClass
}
spec: {
type: parameter.type
mode: parameter.mode
image: parameter.image
imagePullPolicy: parameter.imagePullPolicy
mainClass: parameter.mainClass
mainApplicationFile: parameter.mainApplicationFile
sparkVersion: parameter.sparkVersion
sparkVersion: parameter.sparkVersion
driver: {
cores: parameter.driverCores
cores: parameter.driverCores
}
executor: {
cores: parameter.executorCores
cores: parameter.executorCores
}
}
}
}
}
}
14 changes: 7 additions & 7 deletions experimental/addons/spark-kubernetes-operator/parameter.cue
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@
// container images, ports, and etc.
parameter: {
// +usage=Deploy to specified clusters. Leave empty to deploy to all clusters.
clusters?: [...string]
clusters?: [...string]
// +usage=Namespace to deploy to
namespace: *"spark-operator" | string
namespace: *"spark-operator" | string
// +usage=Specify if create the webhook or not
"createWebhook": *false | bool
"createWebhook": *false | bool
// +usage=Specify the image repository
"imageRepository": *"ghcr.io/googlecloudplatform/spark-operator" | string
// +usage=Specify the image tag
"imageTag": *"v1beta2-1.3.8-3.1.1" | string
"imageRepository": *"ghcr.io/googlecloudplatform/spark-operator" | string
// +usage=Specify the image tag
"imageTag": *"v1beta2-1.3.8-3.1.1" | string
// +usage=Specify if create the sa for job or not
"createSparkServiceAccount": *false|bool
"createSparkServiceAccount": *false | bool
}
173 changes: 87 additions & 86 deletions experimental/addons/spark-kubernetes-operator/template.cue
Original file line number Diff line number Diff line change
@@ -1,101 +1,102 @@
package main

output: {
apiVersion: "core.oam.dev/v1beta1"
kind: "Application"
spec: {
components: [
{
type: "k8s-objects"
name: "spark-operator-ns"
properties: objects: [{
apiVersion: "v1"
kind: "Namespace"
metadata: name: parameter.namespace
}]
},
{
type: "k8s-objects"
name: "spark-cluster-ns"
properties: objects: [{
apiVersion: "v1"
kind: "Namespace"
metadata: name: "spark-cluster"
},
{
apiVersion: "v1"
kind: "ServiceAccount"
metadata: {
name: "spark"
namespace: "spark-cluster"
}
}]
},
{
name: "spark-operator-helm"
type: "helm"
dependsOn: ["spark-operator-ns"]
type: "helm"
properties: {
repoType: "helm"
url: "https://googlecloudplatform.github.io/spark-on-k8s-operator/"
chart: "spark-operator"
targetNamespace: parameter["namespace"]
version: "1.1.26"
values: {
image: {
repository: parameter["imageRepository"]
tag: parameter["imageTag"]
}
type: "k8s-objects"
name: "spark-operator-ns"
properties: objects: [{
apiVersion: "v1"
kind: "Namespace"
metadata: name: parameter.namespace
}]
},
{
type: "k8s-objects"
name: "spark-cluster-ns"
properties: objects: [{
apiVersion: "v1"
kind: "Namespace"
metadata: name: "spark-cluster"
},
{
apiVersion: "v1"
kind: "ServiceAccount"
metadata: {
name: "spark"
namespace: "spark-cluster"
}
}]
},
{
name: "spark-operator-helm"
type: "helm"
dependsOn: ["spark-operator-ns"]
type: "helm"
properties: {
repoType: "helm"
url: "https://googlecloudplatform.github.io/spark-on-k8s-operator/"
chart: "spark-operator"
targetNamespace: parameter["namespace"]
version: "1.1.26"
values: {
image: {
repository: parameter["imageRepository"]
tag: parameter["imageTag"]
}

serviceAccounts: {
spark: {
create: parameter["createSparkServiceAccount"]
}
}
serviceAccounts: {
spark: {
create: parameter["createSparkServiceAccount"]
}
}

serviceAccounts: {
sparkoperator: {
name: "spark-kubernetes-operator"
}
}
serviceAccounts: {
sparkoperator: {
name: "spark-kubernetes-operator"
}
}

webhook: {
enable: parameter["createWebhook"]
}
}
}
},
webhook: {
enable: parameter["createWebhook"]
}
}
}
},
]

policies: [
{
name: "gc-dependency",
type: "garbage-collect",
properties: {
order: "dependency"
}
}
{
type: "shared-resource"
name: "shared-resource-via-namespace"
properties: rules: [{
selector: resourceTypes: ["Namespace"]
}]
}
{
type: "topology"
name: "deploy-operator"
properties: {
namespace: parameter.namespace
if parameter.clusters != _|_ {
clusters: parameter.clusters
}
if parameter.clusters == _|_ {
clusterLabelSelector: {}
}
}
}
{
name: "gc-dependency"
type: "garbage-collect"
properties: {
order: "dependency"
}
},
{
type: "shared-resource"
name: "shared-resource-via-namespace"
properties: rules: [{
selector: resourceTypes: ["Namespace"]
}]
},
{
type: "topology"
name: "deploy-operator"
properties: {
namespace: parameter.namespace
if parameter.clusters != _|_ {
clusters: parameter.clusters
}

if parameter.clusters == _|_ {
clusterLabelSelector: {}
}
}
},
]
}
}

0 comments on commit 06c2795

Please sign in to comment.