目录
一、kubesphere安装
1、安装本地持久存储
1.1、default-storage-class.yaml
1.2、 openebs-operator.yaml
1.3、安装 Default StorageClass
2、安装kubesphere
2.1、安装Helm
2.2、安装kubesphere
二、配置kubesphere
1、安装插件
2、创建devops项目
3、配置SonarQube
3.1、安装SonarQube 服务器
3.2、获取 SonarQube 控制台地址
3.3、配置 SonarQube 服务器
3.3.1、创建 SonarQube 管理员令牌 (Token)
3.3.2、创建 Webhook 服务器
3.3.3、将 SonarQube 服务器添加至 Jenkins
3.3.4、配置devops插件
3.3.5、进入jenkins配置sonarqube,若需要输入密码( admin/P@88w0rd)
3.3.6、将 SonarQube 配置添加到 DevOps
3.3.7、将 sonarqubeURL 添加到 KubeSphere 控制台
3.3.8、重启服务
3.4、配置Maven私服配置
三、配置微服务项目
1、创建Harbor凭证
2、构建maven
2.1、下载源码
2.2、配置文件
2.3、 构建镜像推到私服
2.4、修改kubesphere配置文件
2.5、创建docker密钥
3、构建devops
3.1、创建凭证
3.2、创建流水线
3.3、编辑jenkinsfile
3.4、创建harbor-secret
3.5、验证
四、参考
一、kubesphere安装
1、安装本地持久存储
1.1、default-storage-class.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: local
annotations:
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/var/openebs/local/"
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{"cas.openebs.io/config":"-
name: StorageType\n value: \"hostpath\"\n- name: BasePath\n value:
\"/var/openebs/local/\"\n","openebs.io/cas-type":"local","storageclass.beta.kubernetes.io/is-default-class":"true","storageclass.kubesphere.io/supported-access-modes":"[\"ReadWriteOnce\"]"},"name":"local"},"provisioner":"openebs.io/local","reclaimPolicy":"Delete","volumeBindingMode":"WaitForFirstConsumer"}
openebs.io/cas-type: local
storageclass.beta.kubernetes.io/is-default-class: 'true'
storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]'
provisioner: openebs.io/local
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
1.2、 openebs-operator.yaml
# This manifest deploys the OpenEBS control plane components,
# with associated CRs & RBAC rules
# NOTE: On GKE, deploy the openebs-operator.yaml in admin context
#
# NOTE: The Jiva and cStor components previously included in the Operator File
# are now removed and it is recommended for users to use cStor and Jiva CSI operators.
# To upgrade your Jiva and cStor volumes to CSI, please checkout the documentation at:
# https://github.com/openebs/upgrade
#
# To deploy the legacy Jiva and cStor:
# kubectl apply -f https://openebs.github.io/charts/legacy-openebs-operator.yaml
#
# To deploy cStor CSI:
# kubectl apply -f https://openebs.github.io/charts/cstor-operator.yaml
#
# To deploy Jiva CSI:
# kubectl apply -f https://openebs.github.io/charts/jiva-operator.yaml
#
# Create the OpenEBS namespace
apiVersion: v1
kind: Namespace
metadata:
name: openebs
---
# Create Maya Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
name: openebs-maya-operator
namespace: openebs
---
# Define Role that allows operations on K8s pods/deployments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-maya-operator
rules:
- apiGroups: ["*"]
resources: ["nodes", "nodes/proxy"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["namespaces", "services", "pods", "pods/exec", "deployments", "deployments/finalizers", "replicationcontrollers", "replicasets", "events", "endpoints", "configmaps", "secrets", "jobs", "cronjobs"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["statefulsets", "daemonsets"]
verbs: ["*"]
- apiGroups: ["*"]
resources: ["resourcequotas", "limitranges"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["ingresses", "horizontalpodautoscalers", "verticalpodautoscalers", "certificatesigningrequests"]
verbs: ["list", "watch"]
- apiGroups: ["*"]
resources: ["storageclasses", "persistentvolumeclaims", "persistentvolumes"]
verbs: ["*"]
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
resources: ["volumesnapshots", "volumesnapshotdatas"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: [ "get", "list", "create", "update", "delete", "patch"]
- apiGroups: ["openebs.io"]
resources: [ "*"]
verbs: ["*" ]
- apiGroups: ["cstor.openebs.io"]
resources: [ "*"]
verbs: ["*" ]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["get", "create", "list", "delete", "update", "patch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
- apiGroups: ["*"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "list", "create", "delete", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "create", "update"]
---
# Bind the Service Account with the Role Privileges.
# TODO: Check if default account also needs to be there
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: openebs-maya-operator
subjects:
- kind: ServiceAccount
name: openebs-maya-operator
namespace: openebs
roleRef:
kind: ClusterRole
name: openebs-maya-operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
creationTimestamp: null
name: blockdevices.openebs.io
spec:
group: openebs.io
names:
kind: BlockDevice
listKind: BlockDeviceList
plural: blockdevices
shortNames:
- bd
singular: blockdevice
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.nodeAttributes.nodeName
name: NodeName
type: string
- jsonPath: .spec.path
name: Path
priority: 1
type: string
- jsonPath: .spec.filesystem.fsType
name: FSType
priority: 1
type: string
- jsonPath: .spec.capacity.storage
name: Size
type: string
- jsonPath: .status.claimState
name: ClaimState
type: string
- jsonPath: .status.state
name: Status
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: BlockDevice is the Schema for the blockdevices API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: DeviceSpec defines the properties and runtime status of a BlockDevice
properties:
aggregateDevice:
description: AggregateDevice was intended to store the hierarchical information in cases of LVM. However this is currently not implemented and may need to be re-looked into for better design. To be deprecated
type: string
capacity:
description: Capacity
properties:
logicalSectorSize:
description: LogicalSectorSize is blockdevice logical-sector size in bytes
format: int32
type: integer
physicalSectorSize:
description: PhysicalSectorSize is blockdevice physical-Sector size in bytes
format: int32
type: integer
storage:
description: Storage is the blockdevice capacity in bytes
format: int64
type: integer
required:
- storage
type: object
claimRef:
description: ClaimRef is the reference to the BDC which has claimed this BD
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
details:
description: Details contain static attributes of BD like model,serial, and so forth
properties:
compliance:
description: Compliance is standards/specifications version implemented by device firmware such as SPC-1, SPC-2, etc
type: string
deviceType:
description: DeviceType represents the type of device like sparse, disk, partition, lvm, crypt
enum:
- disk
- partition
- sparse
- loop
- lvm
- crypt
- dm
- mpath
type: string
driveType:
description: DriveType is the type of backing drive, HDD/SSD
enum:
- HDD
- SSD
- Unknown
- ""
type: string
firmwareRevision:
description: FirmwareRevision is the disk firmware revision
type: string
hardwareSectorSize:
description: HardwareSectorSize is the hardware sector size in bytes
format: int32
type: integer
logicalBlockSize:
description: LogicalBlockSize is the logical block size in bytes reported by /sys/class/block/sda/queue/logical_block_size
format: int32
type: integer
model:
description: Model is model of disk
type: string
physicalBlockSize:
description: PhysicalBlockSize is the physical block size in bytes reported by /sys/class/block/sda/queue/physical_block_size
format: int32
type: integer
serial:
description: Serial is serial number of disk
type: string
vendor:
description: Vendor is vendor of disk
type: string
type: object
devlinks:
description: DevLinks contains soft links of a block device like /dev/by-id/... /dev/by-uuid/...
items:
description: DeviceDevLink holds the mapping between type and links like by-id type or by-path type link
properties:
kind:
description: Kind is the type of link like by-id or by-path.
enum:
- by-id
- by-path
type: string
links:
description: Links are the soft links
items:
type: string
type: array
type: object
type: array
filesystem:
description: FileSystem contains mountpoint and filesystem type
properties:
fsType:
description: Type represents the FileSystem type of the block device
type: string
mountPoint:
description: MountPoint represents the mountpoint of the block device.
type: string
type: object
nodeAttributes:
description: NodeAttributes has the details of the node on which BD is attached
properties:
nodeName:
description: NodeName is the name of the Kubernetes node resource on which the device is attached
type: string
type: object
parentDevice:
description: "ParentDevice was intended to store the UUID of the parent Block Device as is the case for partitioned block devices. \n For example: /dev/sda is the parent for /dev/sda1 To be deprecated"
type: string
partitioned:
description: Partitioned represents if BlockDevice has partitions or not (Yes/No) Currently always default to No. To be deprecated
enum:
- "Yes"
- "No"
type: string
path:
description: Path contain devpath (e.g. /dev/sdb)
type: string
required:
- capacity
- devlinks
- nodeAttributes
- path
type: object
status:
description: DeviceStatus defines the observed state of BlockDevice
properties:
claimState:
description: ClaimState represents the claim state of the block device
enum:
- Claimed
- Unclaimed
- Released
type: string
state:
description: State is the current state of the blockdevice (Active/Inactive/Unknown)
enum:
- Active
- Inactive
- Unknown
type: string
required:
- claimState
- state
type: object
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.5.0
creationTimestamp: null
name: blockdeviceclaims.openebs.io
spec:
group: openebs.io
names:
kind: BlockDeviceClaim
listKind: BlockDeviceClaimList
plural: blockdeviceclaims
shortNames:
- bdc
singular: blockdeviceclaim
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.blockDeviceName
name: BlockDeviceName
type: string
- jsonPath: .status.phase
name: Phase
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
description: BlockDeviceClaim is the Schema for the blockdeviceclaims API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: DeviceClaimSpec defines the request details for a BlockDevice
properties:
blockDeviceName:
description: BlockDeviceName is the reference to the block-device backing this claim
type: string
blockDeviceNodeAttributes:
description: BlockDeviceNodeAttributes is the attributes on the node from which a BD should be selected for this claim. It can include nodename, failure domain etc.
properties:
hostName:
description: HostName represents the hostname of the Kubernetes node resource where the BD should be present
type: string
nodeName:
description: NodeName represents the name of the Kubernetes node resource where the BD should be present
type: string
type: object
deviceClaimDetails:
description: Details of the device to be claimed
properties:
allowPartition:
description: AllowPartition represents whether to claim a full block device or a device that is a partition
type: boolean
blockVolumeMode:
description: 'BlockVolumeMode represents whether to claim a device in Block mode or Filesystem mode. These are use cases of BlockVolumeMode: 1) Not specified: VolumeMode check will not be effective 2) VolumeModeBlock: BD should not have any filesystem or mountpoint 3) VolumeModeFileSystem: BD should have a filesystem and mountpoint. If DeviceFormat is specified then the format should match with the FSType in BD'
type: string
formatType:
description: Format of the device required, eg:ext4, xfs
type: string
type: object
deviceType:
description: DeviceType represents the type of drive like SSD, HDD etc.,
nullable: true
type: string
hostName:
description: Node name from where blockdevice has to be claimed. To be deprecated. Use NodeAttributes.HostName instead
type: string
resources:
description: Resources will help with placing claims on Capacity, IOPS
properties:
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: 'Requests describes the minimum resources required. eg: if storage resource of 10G is requested minimum capacity of 10G should be available TODO for validating'
type: object
required:
- requests
type: object
selector:
description: Selector is used to find block devices to be considered for claiming
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
properties:
key:
description: key is the label key that the selector applies to.
type: string
operator:
description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: object
status:
description: DeviceClaimStatus defines the observed state of BlockDeviceClaim
properties:
phase:
description: Phase represents the current phase of the claim
type: string
required:
- phase
type: object
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# This is the node-disk-manager related config.
# It can be used to customize the disks probes and filters
apiVersion: v1
kind: ConfigMap
metadata:
name: openebs-ndm-config
namespace: openebs
labels:
openebs.io/component-name: ndm-config
data:
# udev-probe is default or primary probe it should be enabled to run ndm
# filterconfigs contains configs of filters. To provide a group of include
# and exclude values add it as , separated string
node-disk-manager.config: |
probeconfigs:
- key: udev-probe
name: udev probe
state: true
- key: seachest-probe
name: seachest probe
state: false
- key: smart-probe
name: smart probe
state: true
filterconfigs:
- key: os-disk-exclude-filter
name: os disk exclude filter
state: true
exclude: "/,/etc/hosts,/boot"
- key: vendor-filter
name: vendor filter
state: true
include: ""
exclude: "CLOUDBYT,OpenEBS"
- key: path-filter
name: path filter
state: true
include: ""
exclude: "/dev/loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/md,/dev/dm-,/dev/rbd,/dev/zd"
# metconfig can be used to decorate the block device with different types of labels
# that are available on the node or come in a device properties.
# node labels - the node where bd is discovered. A whitlisted label prefixes
# attribute labels - a property of the BD can be added as a ndm label as ndm.io/<property>=<property-value>
metaconfigs:
- key: node-labels
name: node labels
pattern: ""
- key: device-labels
name: device labels
type: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: openebs-ndm
namespace: openebs
labels:
name: openebs-ndm
openebs.io/component-name: ndm
openebs.io/version: 3.5.0
spec:
selector:
matchLabels:
name: openebs-ndm
openebs.io/component-name: ndm
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
name: openebs-ndm
openebs.io/component-name: ndm
openebs.io/version: 3.5.0
spec:
# By default the node-disk-manager will be run on all kubernetes nodes
# If you would like to limit this to only some nodes, say the nodes
# that have storage attached, you could label those node and use
# nodeSelector.
#
# e.g. label the storage nodes with - "openebs.io/nodegroup"="storage-node"
# kubectl label node <node-name> "openebs.io/nodegroup"="storage-node"
#nodeSelector:
# "openebs.io/nodegroup": "storage-node"
serviceAccountName: openebs-maya-operator
hostNetwork: true
# host PID is used to check status of iSCSI Service when the NDM
# API service is enabled
#hostPID: true
containers:
- name: node-disk-manager
image: openebs/node-disk-manager:2.1.0
args:
- -v=4
# The feature-gate is used to enable the new UUID algorithm.
- --feature-gates="GPTBasedUUID"
# Use partition table UUID instead of create single partition to get
# partition UUID. Require `GPTBasedUUID` to be enabled with.
# - --feature-gates="PartitionTableUUID"
# Detect changes to device size, filesystem and mount-points without restart.
# - --feature-gates="ChangeDetection"
# The feature gate is used to start the gRPC API service. The gRPC server
# starts at 9115 port by default. This feature is currently in Alpha state
# - --feature-gates="APIService"
# The feature gate is used to enable NDM, to create blockdevice resources
# for unused partitions on the OS disk
# - --feature-gates="UseOSDisk"
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
volumeMounts:
- name: config
mountPath: /host/node-disk-manager.config
subPath: node-disk-manager.config
readOnly: true
# make udev database available inside container
- name: udev
mountPath: /run/udev
- name: procmount
mountPath: /host/proc
readOnly: true
- name: devmount
mountPath: /dev
- name: basepath
mountPath: /var/openebs/ndm
- name: sparsepath
mountPath: /var/openebs/sparse
env:
# namespace in which NDM is installed will be passed to NDM Daemonset
# as environment variable
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# pass hostname as env variable using downward API to the NDM container
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# specify the directory where the sparse files need to be created.
# if not specified, then sparse files will not be created.
- name: SPARSE_FILE_DIR
value: "/var/openebs/sparse"
# Size(bytes) of the sparse file to be created.
- name: SPARSE_FILE_SIZE
value: "10737418240"
# Specify the number of sparse files to be created
- name: SPARSE_FILE_COUNT
value: "0"
livenessProbe:
exec:
command:
- pgrep
- "ndm"
initialDelaySeconds: 30
periodSeconds: 60
volumes:
- name: config
configMap:
name: openebs-ndm-config
- name: udev
hostPath:
path: /run/udev
type: Directory
# mount /proc (to access mount file of process 1 of host) inside container
# to read mount-point of disks and partitions
- name: procmount
hostPath:
path: /proc
type: Directory
- name: devmount
# the /dev directory is mounted so that we have access to the devices that
# are connected at runtime of the pod.
hostPath:
path: /dev
type: Directory
- name: basepath
hostPath:
path: /var/openebs/ndm
type: DirectoryOrCreate
- name: sparsepath
hostPath:
path: /var/openebs/sparse
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-ndm-operator
namespace: openebs
labels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
openebs.io/version: 3.5.0
spec:
selector:
matchLabels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-ndm-operator
openebs.io/component-name: ndm-operator
openebs.io/version: 3.5.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: node-disk-operator
image: openebs/node-disk-operator:2.1.0
imagePullPolicy: IfNotPresent
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# the service account of the ndm-operator pod
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPERATOR_NAME
value: "node-disk-operator"
- name: CLEANUP_JOB_IMAGE
value: "openebs/linux-utils:3.5.0"
# OPENEBS_IO_IMAGE_PULL_SECRETS environment variable is used to pass the image pull secrets
# to the cleanup pod launched by NDM operator
#- name: OPENEBS_IO_IMAGE_PULL_SECRETS
# value: ""
livenessProbe:
httpGet:
path: /healthz
port: 8585
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8585
initialDelaySeconds: 5
periodSeconds: 10
---
# Create NDM cluster exporter deployment.
# This is an optional component and is not required for the basic
# functioning of NDM
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-ndm-cluster-exporter
namespace: openebs
labels:
name: openebs-ndm-cluster-exporter
openebs.io/component-name: ndm-cluster-exporter
openebs.io/version: 3.5.0
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
name: openebs-ndm-cluster-exporter
openebs.io/component-name: ndm-cluster-exporter
template:
metadata:
labels:
name: openebs-ndm-cluster-exporter
openebs.io/component-name: ndm-cluster-exporter
openebs.io/version: 3.5.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: ndm-cluster-exporter
image: openebs/node-disk-exporter:2.1.0
command:
- /usr/local/bin/exporter
args:
- "start"
- "--mode=cluster"
- "--port=$(METRICS_LISTEN_PORT)"
- "--metrics=/metrics"
ports:
- containerPort: 9100
protocol: TCP
name: metrics
imagePullPolicy: IfNotPresent
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: METRICS_LISTEN_PORT
value: :9100
---
# Create NDM cluster exporter service
# This is optional and required only when
# ndm-cluster-exporter deployment is used
apiVersion: v1
kind: Service
metadata:
name: openebs-ndm-cluster-exporter-service
namespace: openebs
labels:
name: openebs-ndm-cluster-exporter-service
openebs.io/component-name: ndm-cluster-exporter
app: openebs-ndm-exporter
spec:
clusterIP: None
ports:
- name: metrics
port: 9100
targetPort: 9100
selector:
name: openebs-ndm-cluster-exporter
---
# Create NDM node exporter daemonset.
# This is an optional component used for getting disk level
# metrics from each of the storage nodes
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: openebs-ndm-node-exporter
namespace: openebs
labels:
name: openebs-ndm-node-exporter
openebs.io/component-name: ndm-node-exporter
openebs.io/version: 3.5.0
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
name: openebs-ndm-node-exporter
openebs.io/component-name: ndm-node-exporter
template:
metadata:
labels:
name: openebs-ndm-node-exporter
openebs.io/component-name: ndm-node-exporter
openebs.io/version: 3.5.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: node-disk-exporter
image: openebs/node-disk-exporter:2.1.0
command:
- /usr/local/bin/exporter
args:
- "start"
- "--mode=node"
- "--port=$(METRICS_LISTEN_PORT)"
- "--metrics=/metrics"
ports:
- containerPort: 9101
protocol: TCP
name: metrics
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: METRICS_LISTEN_PORT
value: :9101
---
# Create NDM node exporter service
# This is optional and required only when
# ndm-node-exporter daemonset is used
apiVersion: v1
kind: Service
metadata:
name: openebs-ndm-node-exporter-service
namespace: openebs
labels:
name: openebs-ndm-node-exporter
openebs.io/component: openebs-ndm-node-exporter
app: openebs-ndm-exporter
spec:
clusterIP: None
ports:
- name: metrics
port: 9101
targetPort: 9101
selector:
name: openebs-ndm-node-exporter
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: openebs-localpv-provisioner
namespace: openebs
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 3.5.0
spec:
selector:
matchLabels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
name: openebs-localpv-provisioner
openebs.io/component-name: openebs-localpv-provisioner
openebs.io/version: 3.5.0
spec:
serviceAccountName: openebs-maya-operator
containers:
- name: openebs-provisioner-hostpath
imagePullPolicy: IfNotPresent
image: openebs/provisioner-localpv:3.4.0
args:
- "--bd-time-out=$(BDC_BD_BIND_RETRIES)"
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_K8S_MASTER
# value: "http://10.128.0.12:8080"
# OPENEBS_IO_KUBE_CONFIG enables openebs provisioner to connect to K8s
# based on this config. This is ignored if empty.
# This is supported for openebs provisioner version 0.5.2 onwards
#- name: OPENEBS_IO_KUBE_CONFIG
# value: "/home/ubuntu/.kube/config"
# This sets the number of times the provisioner should try
# with a polling interval of 5 seconds, to get the Blockdevice
# Name from a BlockDeviceClaim, before the BlockDeviceClaim
# is deleted. E.g. 12 * 5 seconds = 60 seconds timeout
- name: BDC_BD_BIND_RETRIES
value: "12"
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OPENEBS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# OPENEBS_SERVICE_ACCOUNT provides the service account of this pod as
# environment variable
- name: OPENEBS_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPENEBS_IO_ENABLE_ANALYTICS
value: "true"
- name: OPENEBS_IO_INSTALLER_TYPE
value: "openebs-operator"
- name: OPENEBS_IO_HELPER_IMAGE
value: "openebs/linux-utils:3.5.0"
- name: OPENEBS_IO_BASE_PATH
value: "/var/openebs/local"
# LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default
# leader election is enabled.
#- name: LEADER_ELECTION_ENABLED
# value: "true"
# OPENEBS_IO_IMAGE_PULL_SECRETS environment variable is used to pass the image pull secrets
# to the helper pod launched by local-pv hostpath provisioner
#- name: OPENEBS_IO_IMAGE_PULL_SECRETS
# value: ""
# Process name used for matching is limited to the 15 characters
# present in the pgrep output.
# So fullname can't be used here with pgrep (>15 chars).A regular expression
# that matches the entire command name has to specified.
# Anchor `^` : matches any string that starts with `provisioner-loc`
# `.*`: matches any string that has `provisioner-loc` followed by zero or more char
livenessProbe:
exec:
command:
- sh
- -c
- test `pgrep -c "^provisioner-loc.*"` = 1
initialDelaySeconds: 30
periodSeconds: 60
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-hostpath
annotations:
openebs.io/cas-type: local
cas.openebs.io/config: |
#hostpath type will create a PV by
# creating a sub-directory under the
# BASEPATH provided below.
- name: StorageType
value: "hostpath"
#Specify the location (directory) where
# where PV(volume) data will be saved.
# A sub-directory with pv-name will be
# created. When the volume is deleted,
# the PV sub-directory will be deleted.
#Default value is /var/openebs/local
- name: BasePath
value: "/var/openebs/local/"
provisioner: openebs.io/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-device
annotations:
openebs.io/cas-type: local
cas.openebs.io/config: |
#device type will create a PV by
# issuing a BDC and will extract the path
# values from the associated BD.
- name: StorageType
value: "device"
provisioner: openebs.io/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
---
1.3、安装 Default StorageClass
# 安装 iSCSI 协议客户端(OpenEBS 需要该协议提供存储支持) 所有节点都执行
yum install iscsi-initiator-utils -y
# 设置开机启动
systemctl enable --now iscsid
# 启动服务
systemctl start iscsid
# 查看服务状态
systemctl status iscsid
# 安装 OpenEBS
kubectl apply -f https://openebs.github.io/charts/openebs-operator.yaml
# 查看状态(下载镜像可能需要一些时间)
kubectl get all -n openebs
# 在主节点创建本地 storage class
kubectl apply -f default-storage-class.yaml
2、安装kubesphere
2.1、安装Helm
参考官网 Helm | 安装Helm, 这里注意你的k8s版本与Helm版本兼容性。
2.2、安装kubesphere
#假设你已经安装好了Helm3
helm repo add kubesphere https://charts.kubesphere.io/main
#搜索镜像
helm search repo kubesphere
#拉取
helm pull kubesphere/ks-core --version=1.1.3
#解压
tar -xf ks-core-1.1.3.tgz
#创建namespace
kubectl create ns kubesphere-system
kubectl create ns kubesphere-controls-system
kubectl create ns kubesphere-monitoring-system
#安装
helm install ks-core ks-core -n kubesphere-system
#访问路径
http://192.168.139.176:30880
#账号密码
Account: admin
Password: P@88w0rd
二、配置kubesphere
1、安装插件
2、创建devops项目
进入工作台>企业空间>创建
3、配置SonarQube
3.1、安装SonarQube 服务器
helm upgrade --install sonarqube sonarqube --repo https://charts.kubesphere.io/main -n \
> kubesphere-devops-system --create-namespace --set service.type=NodePort
3.2、获取 SonarQube 控制台地址
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services sonarqube-sonarqube)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
#查看资源是否创建完成
kubectl get pod -n kubesphere-devops-system
访问 SonarQube 控制台,默认账号 :密码为admin/admin
3.3、配置 SonarQube 服务器
3.3.1、创建 SonarQube 管理员令牌 (Token)
3.3.2、创建 Webhook 服务器
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT/sonarqube-webhook/
3.3.3、将 SonarQube 服务器添加至 Jenkins
export NODE_PORT=$(kubectl get --namespace kubesphere-devops-system -o jsonpath="{.spec.ports[0].nodePort}" services devops-jenkins)
export NODE_IP=$(kubectl get nodes --namespace kubesphere-devops-system -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
3.3.4、配置devops插件
第一步:修改地址为实际地址
第二步:进入系统空间>项目>kubesphere-devops-system>配置>jenkins-casc-config>jenkins_user.yaml
第三步:修改以下内容为实际能访问的地址
securityRealm:
oic:
clientId: "jenkins"
clientSecret: "jenkins"
tokenServerUrl: "http://192.168.139.176:30880/oauth/token"
authorizationServerUrl: "http://192.168.139.176:30880/oauth/authorize"
userInfoServerUrl: "http://192.168.139.176:30880/oauth/userinfo"
endSessionEndpoint: "http://192.168.139.176:30880/oauth/logout"
logoutFromOpenidProvider: true
scopes: openid profile email
fullNameFieldName: url
userNameField: preferred_username
第四步:系统空间>kubesphere-system项目>kubesphere-config配置
第五步:重启Deployment ks-apiserver
kubectl -n kubesphere-system rollout restart deploy ks-apiserver
3.3.5、进入jenkins配置sonarqube,若需要输入密码( admin/P@88w0rd)
添加凭据,供sonarqube配置使用
3.3.6、将 SonarQube 配置添加到 DevOps
1、执行如下命令
kubectl -n kubesphere-devops-system edit cm devops-config
2、修改配置文件
data:
kubesphere.yaml: |
authentication:
authenticateRateLimiterMaxTries: 10
authenticateRateLimiterDuration: 10m0s
loginHistoryRetentionPeriod: 168h
maximumClockSkew: 10s
jwtSecret: "UDjssmmDgxZtkXVDSeFvBtsZeBSFWhJ6"
devops:
host: http://devops-jenkins.kubesphere-devops-system
username: admin
maxConnections: 100
namespace: kubesphere-devops-system
workerNamespace: kubesphere-devops-worker
sonarqube:
host: http://192.168.139.176:31850
token: deafc2f1c17bf0d6bbeccb2a742a1706bebc0c5a
3、退出保存
3.3.7、将 sonarqubeURL 添加到 KubeSphere 控制台
kubectl edit cm -n kubesphere-system ks-console-config
data:
local_config.yaml: |
server:
http:
hostname: localhost
port: 8000
static:
production:
/public: server/public
/assets: dist/assets
/dist: dist
redis:
port: 6379
host: redis.kubesphere-system.svc
redisTimeout: 5000
sessionTimeout: 7200000
apiServer:
url: http://ks-apiserver
wsUrl: ws://ks-apiserver
client:
version:
kubesphere: v4.1.2
kubernetes: v1.28.2
enableKubeConfig: true
devops: #添加
sonarqubeURL: http://192.168.139.176:31850 #添加
enableNodeListTerminal: true
3.3.8、重启服务
kubectl -n kubesphere-devops-system rollout restart deploy devops-apiserver
kubectl -n kubesphere-system rollout restart deploy ks-console
3.4、配置Maven私服配置
集群管理>host主机群>配置>字典配置>ks-devops-agent
kind: ConfigMap
apiVersion: v1
metadata:
name: ks-devops-agent
namespace: kubesphere-devops-worker
labels:
app.kubernetes.io/managed-by: Helm
kubesphere.io/extension-ref: devops
annotations:
meta.helm.sh/release-name: devops-agent
meta.helm.sh/release-namespace: kubesphere-devops-system
data:
MavenSetting: |
<?xml version="1.0" encoding="UTF-8"?>
<settings
xmlns="http://maven.apache.org/SETTINGS/1.2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.2.0 https://maven.apache.org/xsd/settings-1.2.0.xsd">
<localRepository>/var/jenkins_home/repository</localRepository>
<servers>
<server>
<id>release</id>
<username>admin</username>
<password>123456</password>
</server>
<server>
<id>snapshots</id>
<username>admin</username>
<password>123456</password>
</server>
<server>
<id>snail</id>
<username>admin</username>
<password>123456</password>
</server>
</servers>
<mirrors>
<mirror>
<id>snail</id>
<name>snail</name>
<url>http://192.168.139.184:8081/repository/snail-group/</url>
<mirrorOf>*</mirrorOf>
</mirror>
</mirrors>
<pluginGroups>
<pluginGroup>org.sonarsource.scanner.maven</pluginGroup>
</pluginGroups>
<profiles>
<profile>
<id>dev</id>
<repositories>
<repository>
<id>nexus</id>
<url>http://192.168.139.184:8081/repository/snail-group/</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>public</id>
<name>Public Repositories</name>
<url>http://192.168.139.184:8081/repository/snail-group/</url>
</pluginRepository>
</pluginRepositories>
</profile>
<profile>
<id>jdk-17</id>
<activation>
<activeByDefault>true</activeByDefault>
<jdk>17</jdk>
</activation>
<properties>
<sonar.host.url>http://192.168.139.176:30335</sonar.host.url>
</properties>
</profile>
</profiles>
<activeProfiles>
<activeProfile>dev</activeProfile>
</activeProfiles>
</settings>
三、配置微服务项目
1、创建Harbor凭证
集群管理>配置>保密字典>添加
2、构建maven
由于kubesphere的maven版本使用的是3.5.3,版本太低了,需要自己构建高版本Docker镜像
2.1、下载源码
https://github.com/carlossg/docker-maven/tree/main/eclipse-temurin-17
2.2、配置文件
Dockerfile
FROM eclipse-temurin:17-jdk as builder
ARG MAVEN_VERSION=3.9.9
ARG USER_HOME_DIR="/root"
ARG SHA=a555254d6b53d267965a3404ecb14e53c3827c09c3b94b5678835887ab404556bfaf78dcfe03ba76fa2508649dca8531c74bca4d5846513522404d48e8c4ac8b
ARG BASE_URL=https://dlcdn.apache.org/maven/maven-3/${MAVEN_VERSION}/binaries
ENV MAVEN_HOME=/usr/share/maven
ENV MAVEN_CONFIG="$USER_HOME_DIR/.m2"
RUN apt-get update \
&& apt-get install -y ca-certificates curl git gnupg dirmngr --no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
RUN set -eux; curl -fsSLO --retry 3 --retry-connrefused --compressed ${BASE_URL}/apache-maven-${MAVEN_VERSION}-bin.tar.gz \
&& echo "${SHA} *apache-maven-${MAVEN_VERSION}-bin.tar.gz" | sha512sum -c - \
&& curl -fsSLO --compressed ${BASE_URL}/apache-maven-${MAVEN_VERSION}-bin.tar.gz.asc \
&& export GNUPGHOME="$(mktemp -d)"; \
for key in \
6A814B1F869C2BBEAB7CB7271A2A1C94BDE89688 \
29BEA2A645F2D6CED7FB12E02B172E3E156466E8 \
88BE34F94BDB2B5357044E2E3A387D43964143E3 \
; do \
gpg --batch --keyserver hkps://keyserver.ubuntu.com --recv-keys "$key" ; \
done; \
gpg --batch --verify apache-maven-${MAVEN_VERSION}-bin.tar.gz.asc apache-maven-${MAVEN_VERSION}-bin.tar.gz
RUN mkdir -p ${MAVEN_HOME} ${MAVEN_HOME}/ref \
&& tar -xzf apache-maven-${MAVEN_VERSION}-bin.tar.gz -C ${MAVEN_HOME} --strip-components=1 \
&& ln -s ${MAVEN_HOME}/bin/mvn /usr/bin/mvn
# smoke test
RUN mvn --version
FROM eclipse-temurin:17-jdk
RUN apt-get update \
&& apt-get install -y ca-certificates curl git openssh-client --no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
LABEL org.opencontainers.image.title="Apache Maven"
LABEL org.opencontainers.image.source=https://github.com/carlossg/docker-maven
LABEL org.opencontainers.image.url=https://github.com/carlossg/docker-maven
LABEL org.opencontainers.image.description="Apache Maven is a software project management and comprehension tool. Based on the concept of a project object model (POM), Maven can manage a project's build, reporting and documentation from a central piece of information."
ENV MAVEN_HOME=/usr/share/maven
COPY --from=builder ${MAVEN_HOME} ${MAVEN_HOME}
COPY mvn-entrypoint.sh /usr/local/bin/mvn-entrypoint.sh
COPY settings-docker.xml /usr/share/maven/ref/
RUN ln -s ${MAVEN_HOME}/bin/mvn /usr/bin/mvn
ARG MAVEN_VERSION=3.9.9
ARG USER_HOME_DIR="/root"
ENV MAVEN_CONFIG="$USER_HOME_DIR/.m2"
ENTRYPOINT ["/usr/local/bin/mvn-entrypoint.sh"]
CMD ["mvn"]
mvn-entrypoint.sh
#! /bin/sh -eu
# Copy files from /usr/share/maven/ref into ${MAVEN_CONFIG}
# So the initial ~/.m2 is set with expected content.
# Don't override, as this is just a reference setup
copy_reference_files() {
local log="$MAVEN_CONFIG/copy_reference_file.log"
local ref="/usr/share/maven/ref"
if mkdir -p "${MAVEN_CONFIG}/repository" && touch "${log}" > /dev/null 2>&1 ; then
cd "${ref}"
local reflink=""
if cp --help 2>&1 | grep -q reflink ; then
reflink="--reflink=auto"
fi
if [ -n "$(find "${MAVEN_CONFIG}/repository" -maxdepth 0 -type d -empty 2>/dev/null)" ] ; then
# destination is empty...
echo "--- Copying all files to ${MAVEN_CONFIG} at $(date)" >> "${log}"
cp -rv ${reflink} . "${MAVEN_CONFIG}" >> "${log}"
else
# destination is non-empty, copy file-by-file
echo "--- Copying individual files to ${MAVEN_CONFIG} at $(date)" >> "${log}"
find . -type f -exec sh -eu -c '
log="${1}"
shift
reflink="${1}"
shift
for f in "$@" ; do
if [ ! -e "${MAVEN_CONFIG}/${f}" ] || [ -e "${f}.override" ] ; then
mkdir -p "${MAVEN_CONFIG}/$(dirname "${f}")"
cp -rv ${reflink} "${f}" "${MAVEN_CONFIG}/${f}" >> "${log}"
fi
done
' _ "${log}" "${reflink}" {} +
fi
echo >> "${log}"
else
echo "Can not write to ${log}. Wrong volume permissions? Carrying on ..."
fi
}
owd="$(pwd)"
copy_reference_files
unset MAVEN_CONFIG
cd "${owd}"
unset owd
exec "$@"
settings-docker.xml
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
https://maven.apache.org/xsd/settings-1.0.0.xsd">
<localRepository>/usr/share/maven/ref/repository</localRepository>
</settings>
2.3、 构建镜像推到私服
#构建镜像
docker build -t 192.168.139.184:8899/library/maven:3.9.9-jdk17 .
#登录docker私服
docker login -uadmin 192.168.139.184:8899
#推送到仓库
docker push 192.168.139.184:8899/library/maven:3.9.9-jdk17
2.4、修改kubesphere配置文件
集群管理>host主集群>配置>字典配置>jenkins-casc-config,在mavenjdk11同级目录下添加jdk17的配置。
修改这个文件 jenkins_user.yaml
- name: "mavenjdk17"
label: "mavenjdk17"
inheritFrom: "maven"
imagePullSecrets:
- name: harbor-secret
containers:
- name: "maven"
image: "192.168.139.184:8899/library/maven:3.9.9-jdk17"
volumes:
- hostPathVolume:
hostPath: "/var/run/docker.sock"
mountPath: "/var/run/docker.sock"
- hostPathVolume:
hostPath: "/var/data/jenkins_maven_cache"
mountPath: "/root/.m2"
- hostPathVolume:
hostPath: "/var/data/jenkins_sonar_cache"
mountPath: "/root/.sonar/cache"
- hostPathVolume:
hostPath: "/usr/bin/docker"
mountPath: "/usr/bin/docker"
- hostPathVolume:
hostPath: "/usr/bin/kubectl"
mountPath: "/usr/bin/kubectl"
- hostPathVolume:
hostPath: "/usr/bin/envsubst"
mountPath: "/usr/bin/envsubst"
yaml: |
spec:
containers:
- name: "maven"
volumeMounts:
- name: config-volume
mountPath: /usr/share/maven/conf/settings.xml
subPath: settings.xml
volumes:
- name: config-volume
configMap:
name: ks-devops-agent
items:
- key: MavenSetting
path: settings.xml
2.5、创建docker密钥
#namespace为 kubesphere-devops-worker
kubectl create secret docker-registry harbor-secret --docker-server=192.168.139.184:8899 --docker-username=admin --docker-password=Harbor12345 -n kubesphere-devops-worker
3、构建devops
3.1、创建凭证
企业空间wssnail-shop>devops项目>ks-wssnail-shop-dev>DevOps 项目设置>凭证
3.2、创建流水线
3.3、编辑jenkinsfile
pipeline {
agent {
node {
label 'mavenjdk17' //这里要和自定义的maven仓库一致
}
}
stages {
stage('checkout scm') {
agent none
steps {
git(url: 'http://192.168.139.184:9000/shop/wssnail-shop.git', credentialsId: 'git-user-pwd', branch: '$BRANCH', changelog: true, poll: false)
}
}
stage('unit test') {
agent none
steps {
container('maven') {
sh '''cd ${SERVICE}
pwd
echo "${SERVICE}"
mvn clean test'''
}
}
}
stage('Code Analysis') {
agent none
steps {
container('maven') {
withCredentials([string(credentialsId: 'sonar-token', variable: 'SONAR_TOKEN')]) {
withSonarQubeEnv('sonar') {
sh '''service_name=${SERVICE#*/}
service_name=${service_name#*/}
cd ${SERVICE}
mvn sonar:sonar -Dsonar.projectKey=${service_name} -Dsonar.login=$SONAR_TOKEN
echo "mvn sonar:sonar -Dsonar.projectKey=${service_name}"'''
}
}
timeout(unit: 'MINUTES', activity: true, time: 15) {
waitForQualityGate 'true'
}
}
}
}
stage('build & push') {
agent none
steps {
withCredentials([usernamePassword(credentialsId: 'harbor-user-pwd', passwordVariable: 'DOCKER_PASSWORD', usernameVariable: 'DOCKER_USERNAME')]) {
container('maven') {
sh '''cd ${SERVICE}
mvn clean package -DskipTests
cd ${WORKSPACE}
chmod -R 777 deploy/copy.sh && deploy/copy.sh'''
sh '''echo "${DOCKER_PASSWORD}" | docker login ${REGISTRY} -u "${DOCKER_USERNAME}" --password-stdin
service_name=${SERVICE#*/}
service_name=${service_name#*/}
cd deploy/${service_name}/build
if test "\${DOCKERHUB_NAMESPACE}" = "\${DOCKERHUB_NAMESPACE_SNAPSHOT}"; then
echo "DOCKERHUB_NAMESPACE is snapshot...."
docker build -f Dockerfile -t \${REGISTRY}/\${DOCKERHUB_NAMESPACE}/\${service_name}:SNAPSHOT-\$BUILD_NUMBER .
docker push \${REGISTRY}/\${DOCKERHUB_NAMESPACE}/\${service_name}:SNAPSHOT-\${BUILD_NUMBER}
else
docker build -f Dockerfile -t \${REGISTRY}/\${DOCKERHUB_NAMESPACE}/\${service_name}:SNAPSHOT-\$BUILD_NUMBER .
echo "DOCKERHUB_NAMESPACE is release...."
fi'''
}
}
}
}
stage('push latest') {
steps {
container('maven') {
sh '''service_name=${SERVICE#*/}
service_name=${service_name#*/}
cd deploy/${service_name}/build
docker tag ${REGISTRY}/${DOCKERHUB_NAMESPACE}/${service_name}:SNAPSHOT-${BUILD_NUMBER} ${REGISTRY}/${DOCKERHUB_NAMESPACE}/${service_name}:latest
docker push ${REGISTRY}/${DOCKERHUB_NAMESPACE}/${service_name}:latest'''
}
}
}
stage('deploy to dev') {
agent none
when {
expression {
return params.TAG_NAME =~ /snapshot.*/
}
}
steps {
input(message: 'deploy to dev?', submitter: '')
container('maven') {
withCredentials([kubeconfigContent(credentialsId: 'kubeconfig-id', variable: 'ADMIN_KUBECONFIG')]) {
sh '''service_name=${SERVICE#*/}
service_name=${service_name#*/}
cd deploy/${service_name}
sed -i\'\' "s#REGISTRY#${REGISTRY}#" deployment.yaml
sed -i\'\' "s#DOCKERHUB_NAMESPACE#${DOCKERHUB_NAMESPACE}#" deployment.yaml
sed -i\'\' "s#APP_NAME#${service_name}#" deployment.yaml
sed -i\'\' "s#BUILD_NUMBER#${BUILD_NUMBER}#" deployment.yaml
sed -i\'\' "s#REPLICAS#${REPLICAS}#" deployment.yaml
mkdir ~/.kube
echo "$ADMIN_KUBECONFIG" > ~/.kube/config
kubectl create cm ${service_name}-yml --dry-run=\'client\' -o yaml --from-file=build/target/bootstrap.yml -n prod-wssnail-shopf9vqj > ${service_name}-configmap.yml
kubectl apply -f .'''
}
}
}
}
stage('push with tag') {
agent none
when {
expression {
return params.TAG_NAME =~ /v.*/
}
}
steps {
input(message: 'release image with tag?', submitter: '')
withCredentials([usernamePassword(credentialsId: 'git-user-pwd', passwordVariable: 'GIT_PASSWORD', usernameVariable: 'GIT_USERNAME')]) {
sh 'git config --global user.email "snail"'
sh 'git config --global user.name "snail"'
sh 'git tag -a ${TAG_NAME} -m "${TAG_NAME}"'
sh 'git push http://${GIT_USERNAME}:${GIT_PASSWORD}@${GIT_REPO_URL}/${GIT_ACCOUNT}/${APP_NAME}.git --tags --ipv4'
container('maven') {
sh '''service_name=${SERVICE#*/}
service_name=${service_name#*/}
docker tag ${REGISTRY}/${DOCKERHUB_NAMESPACE}/${service_name}:SNAPSHOT-${BUILD_NUMBER} ${REGISTRY}/${DOCKERHUB_NAMESPACE}/${service_name}:${TAG_NAME}
docker push ${REGISTRY}/${DOCKERHUB_NAMESPACE}/${service_name}:${TAG_NAME}'''
}
}
}
}
stage('deploy to production') {
agent none
when {
expression {
return params.TAG_NAME =~ /v.*/
}
}
steps {
input(message: 'deploy-to-production?', submitter: '')
container('maven') {
withCredentials([kubeconfigContent(credentialsId: 'kubeconfig-id', variable: 'ADMIN_KUBECONFIG')]) {
sh '''service_name=${SERVICE#*/}
service_name=${service_name#*/}
cd deploy/${service_name}/prod
sed -i\'\' "s#REGISTRY#${REGISTRY}#" deployment.yaml
sed -i\'\' "s#DOCKERHUB_NAMESPACE#${DOCKERHUB_NAMESPACE}#" deployment.yaml
sed -i\'\' "s#APP_NAME#${service_name}#" deployment.yaml
sed -i\'\' "s#TAG_NAME#${TAG_NAME}#" deployment.yaml
sed -i\'\' "s#REPLICAS#${REPLICAS}#" deployment.yaml
mkdir ~/.kube
echo "$ADMIN_KUBECONFIG" > ~/.kube/config
kubectl create cm ${service_name}-yml --dry-run=\'client\' -o yaml --from-file=../build/target/bootstrap.yml -n prod-wssnail-shopf9vqj > ${service_name}-configmap.yml
kubectl apply -f .'''
}
}
}
}
}
environment {
APP_NAME = 'wssnail-shop'
DOCKER_CREDENTIAL_ID = 'harbor-user-pwd'
REGISTRY = '192.168.139.184:8899'
GIT_REPO_URL = '192.168.139.184:9000'
GIT_CREDENTIAL_ID = 'git-user-pwd'
GIT_ACCOUNT = 'shop'
SONAR_CREDENTIAL_ID = 'sonar-token'
DOCKERHUB_NAMESPACE_SNAPSHOT = 'snapshot'
DOCKERHUB_NAMESPACE_RELEASE = 'release'
}
parameters {
choice(name: 'SERVICE', choices: ['wssnail-shop-parent/shop-gateway','wssnail-shop-parent/shop-uaa','wssnail-shop-parent/shop-commodity','wssnail-shop-parent/shop-order'], description: '请选择要部署的服务')
choice(name: 'DOCKERHUB_NAMESPACE', choices: ['snapshot', 'release'], description: '请选择部署到哪个镜像仓库')
choice(name: 'REPLICAS', choices: ['1', '3', '5', '7'], description: '请选择构建后的副本数')
string(name: 'BRANCH', defaultValue: 'master', description: '请输入要构建的分支名称')
string(name: 'TAG_NAME', defaultValue: 'snapshot', description: '部署版本:必须以 v 开头,例如:v1、v1.0.0')
}
}
3.4、创建harbor-secret
kubectl create secret docker-registry harbor-secret --docker-server=192.168.139.184:8899 --docker-username=admin --docker-password=Harbor12345 -n prod-wssnail-shopf9vqj
3.5、验证
四、参考
https://blog.csdn.net/huangh0914/article/details/136363139
文档中心