knative official docs
Migrating a Cloud Run for Anthos service to Knative
https://cloud.google.com/anthos/run/docs/migrate/knative
# install knative cli
https://knative.dev/docs/client/install-kn/#install-the-knative-cli
brew install knative/client/kn
kn version
# install knative operator
check latest release
https://github.com/knative/operator/releases
kubectl apply -f https://github.com/knative/operator/releases/download/knative-v1.10.2/operator.yaml
# check
kubectl get deployment knative-operator
# output
secret "operator-webhook-certs" created
deployment.apps "operator-webhook" created
service "operator-webhook" created
customresourcedefinition.apiextensions.k8s.io "knativeeventings.operator.knative.dev" created
customresourcedefinition.apiextensions.k8s.io "knativeservings.operator.knative.dev" created
clusterrole.rbac.authorization.k8s.io "knative-serving-operator-aggregated" created
clusterrole.rbac.authorization.k8s.io "knative-serving-operator-aggregated-stable" created
clusterrole.rbac.authorization.k8s.io "knative-eventing-operator-aggregated" created
clusterrole.rbac.authorization.k8s.io "knative-eventing-operator-aggregated-stable" created
clusterrole.rbac.authorization.k8s.io "knative-serving-operator" created
clusterrole.rbac.authorization.k8s.io "knative-eventing-operator" created
serviceaccount "knative-operator" created
clusterrolebinding.rbac.authorization.k8s.io "knative-serving-operator" created
clusterrolebinding.rbac.authorization.k8s.io "knative-eventing-operator" created
role.rbac.authorization.k8s.io "knative-operator-webhook" created
clusterrole.rbac.authorization.k8s.io "knative-operator-webhook" created
serviceaccount "operator-webhook" created
rolebinding.rbac.authorization.k8s.io "operator-webhook" created
clusterrolebinding.rbac.authorization.k8s.io "operator-webhook" created
clusterrolebinding.rbac.authorization.k8s.io "knative-serving-operator-aggregated" created
clusterrolebinding.rbac.authorization.k8s.io "knative-serving-operator-aggregated-stable" created
clusterrolebinding.rbac.authorization.k8s.io "knative-eventing-operator-aggregated" created
clusterrolebinding.rbac.authorization.k8s.io "knative-eventing-operator-aggregated-stable" created
configmap "config-logging" created
configmap "config-observability" created
deployment.apps "knative-operator" created
# install istio
-> knative requires istio as networking layer. so istio installation is required.
## install istio with istio operator
https://istio.io/latest/docs/setup/install/operator/
cf. IstioOperator options
https://istio.io/latest/docs/reference/config/istio.operator.v1alpha1/
https://istio.io/v1.5/docs/reference/config/installation-options/
istioctl operator init
-> istio-operator related resources are created in the istio-operator namespace.
-> Istio is automatically deployed when an IstioOperator resource is created.
-> If the IstioOperator changes after the initial deployment of istio, the istio operator reflects the changes to istio in real time.
kubectl apply -f - <<EOF
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
metadata:
namespace: istio-system
name: istiocontrolplane
spec:
profile: demo
meshConfig:
accessLogFile: /dev/stdout
values:
pilot:
enabled: true
autoscaleEnabled: true
autoscaleMin: 1
autoscaleMax: 5
resources:
requests:
cpu: 500m
memory: 2048Mi
limits:
cpu: 500m
memory: 2048Mi
gateways:
istio-ingressgateway:
autoscaleEnabled: true
type: LoadBalancer
istio-egressgateway:
autoscaleEnabled: true
EOF
kubectl get IstioOperator -n istio-system
kubectl label namespace default istio-injection=enabled
kubectl get ns --show-labels
# install KnativeServing
-> All knative serving-related settings are applied through the KnativeServing resource.
-> The configmap of the knative-serving namespace is automatically modified (do not change it manually, just modify the KnativeServing resource).
-> KnativeServing resource automatically creates istio's gateway resource in the knative-serving namespace.
https://knative.dev/docs/serving/
https://knative.dev/docs/install/operator/configuring-serving-cr/
https://knative.dev/docs/serving/setting-up-custom-ingress-gateway/
https://knative.dev/docs/install/operator/configuring-with-operator/
list of configmap
https://github.com/knative/serving/tree/main/config/core/configmaps
vim knative-serving.yaml
apiVersion: v1
kind: Namespace
metadata:
name: knative-serving
---
apiVersion: operator.knative.dev/v1beta1
kind: KnativeServing
metadata:
name: knative-serving
namespace: knative-serving
spec:
config:
domain:
svc.cluster.local: |
selector:
type: internal
prod.127.0.0.1.nip.io: |
selector:
env: prod
dev.127.0.0.1.nip.io: |
selector:
env: dev
test.127.0.0.1.nip.io: ""
autoscaler:
min-scale: "1"
max-scale: "10"
kubectl apply -f knative-serving.yaml
# check
kubectl get KnativeServing knative-serving -n knative-serving
# check component of KnativeServing
kubectl get deployment -n knative-serving
# output
NAME READY UP-TO-DATE AVAILABLE AGE
activator 1/1 1 1 113m
autoscaler 1/1 1 1 113m
autoscaler-hpa 1/1 1 1 113m
controller 1/1 1 1 113m
domain-mapping 1/1 1 1 113m
domainmapping-webhook 1/1 1 1 113m
net-istio-controller 1/1 1 1 110m
net-istio-webhook 1/1 1 1 110m
webhook 1/1 1 1 113m
# check configmap of KnativeServing
kubectl get cm -n knative-serving
# deploy "helloworld" sample service
-> When creating a knative service, resources such as deployment and virtual service are automatically created.
Deploying a Knative Service
https://knative.dev/docs/getting-started/first-service/
Knative code samples
https://github.com/knative/docs/tree/main/code-samples/serving
vim hello.yaml
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: hello
spec:
template:
spec:
containers:
- image: ghcr.io/knative/helloworld-go:latest
ports:
- containerPort: 8080
env:
- name: TARGET
value: "World"
kubectl apply -f hello.yaml
# check access domain
kubectl get route
-> kn route list
# output
NAME URL READY REASON
hello http://hello.default.test.127.0.0.1.nip.io True
# check list of knative services
kubectl get ksvc
-> kn service list
# output
NAME URL LATESTCREATED LATESTREADY READY REASON
hello http://hello.default.test.127.0.0.1.nip.io hello-00001 hello-00001 True
# check list of revisions
kubectl get rev
-> kn revision list
# output
NAME CONFIG NAME K8S SERVICE NAME GENERATION READY REASON ACTUAL REPLICAS DESIRED REPLICAS
hello-00001 hello 1 True 1 1
# Create a sample service with a dev domain
-> If the key/value applied to the domain selector is set in metadata.labels of the Service resource, the domain is granted according to the rules.
vim hello-dev.yaml
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: hello-dev
labels:
env: dev
spec:
template:
spec:
containers:
- image: ghcr.io/knative/helloworld-go:latest
ports:
- containerPort: 8080
env:
- name: TARGET
value: "World"
# check access domain
kubectl get route
# output
NAME URL READY REASON
hello-dev http://hello-dev.default.dev.127.0.0.1.nip.io True
# check virtual service
kubectl get vs | grep hello-dev
# output
hello-dev-ingress ["knative-serving/knative-ingress-gateway","knative-serving/knative-local-gateway"] ["hello-dev.default","hello-dev.default.dev.127.0.0.1.nip.io","hello-dev.default.svc","hello-dev.default.svc.cluster.local"] 7m13s
hello-dev-mesh ["mesh"] ["hello-dev.default","hello-dev.default.svc","hello-dev.default.svc.cluster.local"] 7m13s
# Create a sample service with a local domain
vim hello-local.yaml
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: hello-local
labels:
type: internal
spec:
template:
spec:
containers:
- image: ghcr.io/knative/helloworld-go:latest
ports:
- containerPort: 8080
env:
- name: TARGET
value: "World"
# check access domain
kubectl get route
# output
NAME URL READY REASON
hello-local http://hello-local.default.svc.cluster.local True
# check virtual service
kubectl get vs | grep hello-local
# output
hello-local-ingress ["knative-serving/knative-local-gateway"] ["hello-local.default","hello-local.default.svc","hello-local.default.svc.cluster.local"] 6m44s
hello-local-mesh ["mesh"] ["hello-local.default","hello-local.default.svc","hello-local.default.svc.cluster.local"] 6m44s
# deletion
## delete knative resources
kubectl delete -f hello.yaml
kubectl delete -f hello-dev.yaml
kubectl delete -f hello-local.yaml
kubectl delete KnativeServing knative-serving -n knative-serving
## delete istio
kubectl delete IstioOperator -n istio-system istiocontrolplane
istioctl uninstall -y --purge
kubectl delete ns istio-system istio-operator --grace-period=0 --force
## delete knative operator
kubectl delete -f https://github.com/knative/operator/releases/download/knative-v1.10.2/operator.yaml
## delete knative crd
kubectl api-resources -o name | grep knative | xargs kubectl delete crd
'kubernetes' 카테고리의 다른 글
Istio CNI 플러그인과 Pod Security Admission (1) | 2024.06.15 |
---|---|
ksniff 로 kubernetes 컨테이너 패킷 캡쳐 (1) | 2024.04.06 |
[cdk8s] Define k8s yaml file in programming language (0) | 2023.09.08 |
kube API server and kubernetes python client (0) | 2023.08.13 |
kops k8s 클러스터 관리도구 (0) | 2023.04.10 |