docs(installation): knative (#3860)

* added initial knative docu

* added menu bar

* simplified installation

* normalized versions to latest

* update envs

* update envs

* Update zitadel-knative-service.yaml

* Update knative.mdx

* Update knative.mdx

* move knative into correct folder

Co-authored-by: Livio Spring <livio.a@gmail.com>
This commit is contained in:
Christian Jakob 2022-06-30 13:28:18 +02:00 committed by GitHub
parent 9c0f494bf2
commit 1cc09a85f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 319 additions and 1 deletions

View File

@ -0,0 +1,181 @@
# Generated file, DO NOT EDIT. Source: cloud/kubernetes/templates/cockroachdb-statefulset.yaml
apiVersion: v1
kind: Service
metadata:
# This service is meant to be used by clients of the database. It exposes a ClusterIP that will
# automatically load balance connections to the different database pods.
name: cockroachdb-public
labels:
app: cockroachdb
spec:
ports:
# The main port, served by gRPC, serves Postgres-flavor SQL, internode
# traffic and the cli.
- port: 26257
targetPort: 26257
name: grpc
# The secondary port serves the UI as well as health and debug endpoints.
- port: 8080
targetPort: 8080
name: http
selector:
app: cockroachdb
---
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
labels:
app: cockroachdb
annotations:
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other CockroachDB pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
clusterIP: None
selector:
app: cockroachdb
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: cockroachdb-budget
labels:
app: cockroachdb
spec:
selector:
matchLabels:
app: cockroachdb
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 1
selector:
matchLabels:
app: cockroachdb
template:
metadata:
labels:
app: cockroachdb
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- cockroachdb
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
image: cockroachdb/cockroach:v22.1.2
imagePullPolicy: IfNotPresent
# TODO: Change these to appropriate values for the hardware that you're running. You can see
# the resources that can be allocated on each of your Kubernetes nodes by running:
# kubectl describe nodes
# Note that requests and limits should have identical values.
resources:
requests:
cpu: "1"
memory: "4Gi"
limits:
cpu: "1"
memory: "4Gi"
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases.
# livenessProbe:
# httpGet:
# path: "/health"
# port: http
# initialDelaySeconds: 30
# periodSeconds: 5
readinessProbe:
httpGet:
path: "/health?ready=1"
port: http
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 2
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
env:
- name: COCKROACH_CHANNEL
value: kubernetes-insecure
- name: GOMAXPROCS
valueFrom:
resourceFieldRef:
resource: limits.cpu
divisor: "1"
- name: MEMORY_LIMIT_MIB
valueFrom:
resourceFieldRef:
resource: limits.memory
divisor: "1Mi"
command:
- "/bin/bash"
- "-ecx"
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
- exec
/cockroach/cockroach
start-single-node
--logtostderr
--insecure
--advertise-host $(hostname -f)
--http-addr 0.0.0.0
--cache $(expr $MEMORY_LIMIT_MIB / 4)MiB
--max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 100Gi

View File

@ -0,0 +1,44 @@
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
creationTimestamp: null
name: zitadel
spec:
template:
metadata:
annotations:
client.knative.dev/user-image: ghcr.io/zitadel/zitadel:v2.0.0-v2-alpha.33-amd64
creationTimestamp: null
spec:
containerConcurrency: 0
containers:
- args:
- admin
- start-from-init
- --masterkey
- MasterkeyNeedsToHave32Characters
env:
- name: ZITADEL_DATABASE_HOST
value: cockroachdb
- name: ZITADEL_EXTERNALSECURE
value: "false"
- name: ZITADEL_TLS_ENABLED
value: "false"
- name: ZITADEL_EXTERNALPORT
value: 80
- name: ZITADEL_EXTERNALDOMAIN
value: zitadel.default.127.0.0.1.sslip.io
- name: ZITADEL_S3DEFAULTINSTANCE_CUSTOMDOMAIN
value: zitadel.default.127.0.0.1.sslip.io
image: ghcr.io/zitadel/zitadel:v2.0.0-v2-alpha.33-amd64
name: user-container
ports:
- containerPort: 8080
protocol: TCP
readinessProbe:
successThreshold: 1
tcpSocket:
port: 0
resources: {}
enableServiceLinks: false
timeoutSeconds: 300

View File

@ -9,6 +9,7 @@ import MacOS from './macos.mdx'
import Windows from './windows.mdx'
import Compose from './compose.mdx'
import Helm from './helm.mdx'
import Knative from './knative.mdx'
import NextSelfHosted from './nextselfhosted.mdx'
# Run ZITADEL
@ -27,7 +28,8 @@ By default, it runs a highly available ZITADEL instance along with a secure and
{'label': 'Linux', 'value': 'linux'},
{'label': 'MacOS', 'value': 'macos'},
{'label': 'Docker Compose', 'value': 'compose'},
{'label': 'Kubernetes (Helm)', 'value': 'k8s'}
{'label': 'Kubernetes (Helm)', 'value': 'k8s'},
{'label': 'Knative', 'value': 'knative'}
]}
>
<TabItem value="saas">
@ -49,4 +51,7 @@ By default, it runs a highly available ZITADEL instance along with a secure and
<Helm/>
<NextSelfHosted/>
</TabItem>
<TabItem value="knative">
<Knative/>
</TabItem>
</Tabs>

View File

@ -0,0 +1,88 @@
## Disclaimer
This guide is for development / demonstration purpose only and does NOT reflect a production setup.
## New Knative environment
### Download and run Knative quickstart
Follow the Knative quickstart guide to get a local kind/minikube environment with Knative capabilities.
It is basically 4 commands on Mac:
```bash
#install knative
brew install knative/client/kn
#install knative quickstart sandbox
brew install knative-sandbox/kn-plugins/quickstart
#install kind
brew install kind
#install quickstart cluster
kn quickstart kind
```
That will get you a ready to go knative/kubernetes environment.
See Knative documentation here:
https://knative.dev/docs/install/quickstart-install/
## Database
start a single-node cockroachdb as statefulset
```bash
kubectl apply -f https://raw.githubusercontent.com/zitadel/zitadel/v2-alpha/deploy/knative/cockroachdb-statefulset-single-node.yaml
```
## Secret for TLS termination
create a secret with your certificates for TLS termination
```bash
#describe happy path
kubectl apply secret -f certs.yaml
```
## Start ZITADEL with Knative
```bash
# start zitadel
kn service create zitadel \
--image ghcr.io/zitadel/zitadel:v2.0.0-v2-alpha.33-amd64 \
--port 8080 \
--env ZITADEL_DATABASE_HOST=cockroachdb \
--env ZITADEL_EXTERNALSECURE=false \
--env ZITADEL_EXTERNALPORT=80 \
--env ZITADEL_TLS_ENABLED=false \
--env ZITADEL_EXTERNALDOMAIN=zitadel.default.127.0.0.1.sslip.io \
--env ZITADEL_S3DEFAULTINSTANCE_CUSTOMDOMAIN=zitadel.default.127.0.0.1.sslip.io \
--arg "admin" --arg "start-from-init" --arg "--masterkey" --arg "MasterkeyNeedsToHave32Characters" \
--mount /tls.secret=secret:certs/tls.secret \
--mount /tls.key=secret:certs/tls.key
```
or use the knative service yaml
```bash
kubectl apply -f https://raw.githubusercontent.com/zitadel/zitadel/v2-alpha/deploy/knative/zitadel-knative-service.yaml
```
## Get started with ZIDATEL
```bash
#get ZIDATEL URL
kn services list
NAME URL LATEST AGE CONDITIONS READY REASON
zitadel http://zitadel.default.127.0.0.1.sslip.io zitadel-00001 10m 3 OK / 3 True
```
Add the console path to the URL and open in browser
http://zitadel.default.127.0.0.1.sslip.io/ui/console
If you didn't configure something else, this is the default IAM admin users login:
* username: zitadel-admin@zitadel.zitadel.default.127.0.0.1.sslip.io
* password: Password1!