[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.
[preflight] Running pre-flight checks
[init] Using Kubernetes version: v1.7.6
[tokens] Generated token: "67e411.zc3617bb21ad7ee3"
[certificates] Generated Certificate Authority key and certificate.
[certificates] Generated API Server key and certificate
[certificates] Generated Service Account signing keys
[certificates] Created keys and certificates in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[apiclient] Created API client, waiting for the control plane to become ready
[apiclient] All control plane components are healthy after 21.317580 seconds
[apiclient] Waiting for at least one node to register and become ready
[apiclient] First node is ready after 6.556101 seconds
[apiclient] Creating a test deployment
[apiclient] Test deployment succeeded
[addons] Created essential addon: kube-proxy
Your Kubernetes master has initialized successfully!
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
http://kubernetes.io/docs/admin/addons/
You can now join any number of machines by running the following on each node:
kubeadm join --token=67e411.zc3617bb21ad7ee3 172.16.2.1

# Calico Version v2.5.1# https://docs.projectcalico.org/v2.5/releases#v2.5.1# This manifest includes the following component versions:# calico/node:v2.5.1# calico/cni:v1.10.0# calico/kube-policy-controller:v0.7.0# This ConfigMap is used to configure a self-hosted Calico installation.kind:ConfigMapapiVersion:v1metadata:name:calico-confignamespace:kube-systemdata:# The location of your etcd cluster. This uses the Service clusterIP defined below.etcd_endpoints:"http://172.16.2.1:2379,http://172.16.2.2:2379,http://172.16.2.3:2379"# Configure the Calico backend to use.calico_backend:"bird"# The CNI network configuration to install on each node.cni_network_config:|-{"name": "k8s-pod-network","cniVersion": "0.1.0","type": "calico","etcd_endpoints": "__ETCD_ENDPOINTS__","log_level": "info","mtu": 1500,"ipam": {"type": "calico-ipam"},"policy": {"type": "k8s","k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__","k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"},"kubernetes": {"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"}}---# This manifest installs the calico/node container, as well# as the Calico CNI plugins and network config on# each master and worker node in a Kubernetes cluster.kind:DaemonSetapiVersion:extensions/v1beta1metadata:name:calico-nodenamespace:kube-systemlabels:k8s-app:calico-nodespec:selector:matchLabels:k8s-app:calico-nodetemplate:metadata:labels:k8s-app:calico-nodeannotations:# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler# reserves resources for critical add-on pods so that they can be rescheduled after# a failure. This annotation works in tandem with the toleration below.scheduler.alpha.kubernetes.io/critical-pod:''spec:hostNetwork:truetolerations:-key:node-role.kubernetes.io/mastereffect:NoSchedule# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.# This, along with the annotation above marks this pod as a critical add-on.-key:CriticalAddonsOnlyoperator:ExistsserviceAccountName:calico-cni-plugincontainers:# Runs calico/node container on each Kubernetes node. This# container programs network policy and routes on each# host.-name:calico-nodeimage:quay.io/calico/node:v2.5.1env:# The location of the Calico etcd cluster.-name:ETCD_ENDPOINTSvalueFrom:configMapKeyRef:name:calico-configkey:etcd_endpoints# Enable BGP. Disable to enforce policy only.-name:CALICO_NETWORKING_BACKENDvalueFrom:configMapKeyRef:name:calico-configkey:calico_backend# Cluster type to identify the deployment type-name:CLUSTER_TYPEvalue:"kubeadm,bgp"# Disable file logging so `kubectl logs` works.-name:CALICO_DISABLE_FILE_LOGGINGvalue:"true"# Set Felix endpoint to host default action to ACCEPT.-name:FELIX_DEFAULTENDPOINTTOHOSTACTIONvalue:"ACCEPT"# Configure the IP Pool from which Pod IPs will be chosen.-name:CALICO_IPV4POOL_CIDRvalue:"10.68.0.0/16"-name:CALICO_IPV4POOL_IPIPvalue:"always"# Disable IPv6 on Kubernetes.-name:FELIX_IPV6SUPPORTvalue:"false"# Set MTU for tunnel device used if ipip is enabled-name:FELIX_IPINIPMTUvalue:"1440"# Set Felix logging to "info"-name:FELIX_LOGSEVERITYSCREENvalue:"info"# Auto-detect the BGP IP address.-name:IPvalue:""-name:FELIX_HEALTHENABLEDvalue:"true"securityContext:privileged:trueresources:requests:cpu:250mlivenessProbe:httpGet:path:/livenessport:9099periodSeconds:10initialDelaySeconds:10failureThreshold:6readinessProbe:httpGet:path:/readinessport:9099periodSeconds:10volumeMounts:-mountPath:/lib/modulesname:lib-modulesreadOnly:true-mountPath:/var/run/caliconame:var-run-calicoreadOnly:false# This container installs the Calico CNI binaries# and CNI network config file on each node.-name:install-cniimage:quay.io/calico/cni:v1.10.0command:["/install-cni.sh"]env:# The location of the Calico etcd cluster.-name:ETCD_ENDPOINTSvalueFrom:configMapKeyRef:name:calico-configkey:etcd_endpoints# The CNI network config to install on each node.-name:CNI_NETWORK_CONFIGvalueFrom:configMapKeyRef:name:calico-configkey:cni_network_configvolumeMounts:-mountPath:/host/opt/cni/binname:cni-bin-dir-mountPath:/host/etc/cni/net.dname:cni-net-dirvolumes:# Used by calico/node.-name:lib-moduleshostPath:path:/lib/modules-name:var-run-calicohostPath:path:/var/run/calico# Used to install CNI.-name:cni-bin-dirhostPath:path:/opt/cni/bin-name:cni-net-dirhostPath:path:/etc/cni/net.d---# This manifest deploys the Calico policy controller on Kubernetes.# See https://github.com/projectcalico/k8s-policyapiVersion:extensions/v1beta1kind:Deploymentmetadata:name:calico-policy-controllernamespace:kube-systemlabels:k8s-app:calico-policyspec:# The policy controller can only have a single active instance.replicas:1strategy:type:Recreatetemplate:metadata:name:calico-policy-controllernamespace:kube-systemlabels:k8s-app:calico-policy-controllerannotations:# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler# reserves resources for critical add-on pods so that they can be rescheduled after# a failure. This annotation works in tandem with the toleration below.scheduler.alpha.kubernetes.io/critical-pod:''spec:# The policy controller must run in the host network namespace so that# it isn't governed by policy that would prevent it from working.hostNetwork:truetolerations:-key:node-role.kubernetes.io/mastereffect:NoSchedule# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.# This, along with the annotation above marks this pod as a critical add-on.-key:CriticalAddonsOnlyoperator:ExistsserviceAccountName:calico-policy-controllercontainers:-name:calico-policy-controllerimage:quay.io/calico/kube-policy-controller:v0.7.0env:# The location of the Calico etcd cluster.-name:ETCD_ENDPOINTSvalueFrom:configMapKeyRef:name:calico-configkey:etcd_endpoints# The location of the Kubernetes API. Use the default Kubernetes# service for API access.-name:K8S_APIvalue:"https://kubernetes.default:443"# Since we're running in the host namespace and might not have KubeDNS# access, configure the container's /etc/hosts to resolve# kubernetes.default to the correct service clusterIP.-name:CONFIGURE_ETC_HOSTSvalue:"true"---apiVersion:rbac.authorization.k8s.io/v1beta1kind:ClusterRoleBindingmetadata:name:calico-cni-pluginroleRef:apiGroup:rbac.authorization.k8s.iokind:ClusterRolename:calico-cni-pluginsubjects:-kind:ServiceAccountname:calico-cni-pluginnamespace:kube-system---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1beta1metadata:name:calico-cni-pluginnamespace:kube-systemrules:-apiGroups:[""]resources:-pods-nodesverbs:-get---apiVersion:v1kind:ServiceAccountmetadata:name:calico-cni-pluginnamespace:kube-system---apiVersion:rbac.authorization.k8s.io/v1beta1kind:ClusterRoleBindingmetadata:name:calico-policy-controllerroleRef:apiGroup:rbac.authorization.k8s.iokind:ClusterRolename:calico-policy-controllersubjects:-kind:ServiceAccountname:calico-policy-controllernamespace:kube-system---kind:ClusterRoleapiVersion:rbac.authorization.k8s.io/v1beta1metadata:name:calico-policy-controllernamespace:kube-systemrules:-apiGroups:-""-extensionsresources:-pods-namespaces-networkpoliciesverbs:-watch-list---apiVersion:v1kind:ServiceAccountmetadata:name:calico-policy-controllernamespace:kube-system

9 部署Dashboard

apiVersion:v1kind:ServiceAccountmetadata:labels:k8s-app:kubernetes-dashboardname:kubernetes-dashboardnamespace:default---apiVersion:rbac.authorization.k8s.io/v1beta1kind:ClusterRoleBindingmetadata:name:kubernetes-dashboardlabels:k8s-app:kubernetes-dashboardroleRef:apiGroup:rbac.authorization.k8s.iokind:ClusterRolename:cluster-adminsubjects:-kind:ServiceAccountname:kubernetes-dashboardnamespace:default---kind:DeploymentapiVersion:extensions/v1beta1metadata:labels:k8s-app:kubernetes-dashboardname:kubernetes-dashboardnamespace:defaultspec:replicas:1revisionHistoryLimit:10selector:matchLabels:k8s-app:kubernetes-dashboardtemplate:metadata:labels:k8s-app:kubernetes-dashboardspec:containers:-name:kubernetes-dashboardimage:cloudnil/kubernetes-dashboard-amd64:v1.7.0ports:-containerPort:9090protocol:TCPargs:livenessProbe:httpGet:path:/port:9090initialDelaySeconds:30timeoutSeconds:30serviceAccountName:kubernetes-dashboard# Comment the following tolerations if Dashboard must not be deployed on mastertolerations:-key:node-role.kubernetes.io/mastereffect:NoSchedule---kind:ServiceapiVersion:v1metadata:labels:k8s-app:kubernetes-dashboardname:kubernetes-dashboardnamespace:defaultspec:ports:-port:80targetPort:9090selector:k8s-app:kubernetes-dashboard---apiVersion:extensions/v1beta1kind:Ingressmetadata:name:dashboard-ingressnamespace:defaultspec:rules:-host:dashboard.cloudnil.comhttp:paths:-path:/backend:serviceName:kubernetes-dashboardservicePort:80