Commit 4224f4b3 authored by Mohamad Bashar Desoki's avatar Mohamad Bashar Desoki

Deploy Services (ms-demo, prometheus, grafana)

parent bf151de2
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: monitoring
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
resources:
- ingresses
verbs: ["get", "list", "watch"]
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitoring
data:
prometheus.yml: |
global:
scrape_interval: 15s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets:
rule_files:
# - "example-file.yml"
scrape_configs:
# MS-Demo App
- job_name: 'ms-demo-service'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
metrics_path: /actuator/prometheus
static_configs:
# Use the service name (see docker-compose-desktop.yml) instead of localhost
- targets: ['ms-demo-service.default.svc:8080']
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitoring
data:
prometheus.yml: |
global:
scrape_interval: 15s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets:
rule_files:
# - "example-file.yml"
scrape_configs:
# MS-Demo App
- job_name: 'ms-demo-service'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
metrics_path: /actuator/prometheus
static_configs:
# Use the service name (see docker-compose-desktop.yml) instead of localhost
- targets: ['ms-demo-service.default.svc:8080']
- job_name: 'kubelet'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
- job_name: 'cadvisor'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true # Required with Minikube.
metrics_path: /metrics/cadvisor
- job_name: 'k8services'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_service_name
action: drop
regex: default;kubernetes
- source_labels:
- __meta_kubernetes_namespace
regex: default
action: keep
- source_labels: [__meta_kubernetes_service_name]
target_label: job
- job_name: 'k8pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_container_port_name]
regex: metrics
action: keep
- source_labels: [__meta_kubernetes_pod_container_name]
target_label: job
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: monitoring
labels:
app: prometheus
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
spec:
containers:
- name: prometheus
image: prom/prometheus
args:
- '--storage.tsdb.retention=6h'
- '--storage.tsdb.path=/prometheus'
- '--config.file=/etc/prometheus/prometheus.yml'
ports:
- name: web
containerPort: 9090
volumeMounts:
- name: prometheus-config-volume
mountPath: /etc/prometheus
- name: prometheus-storage-volume
mountPath: /prometheus
restartPolicy: Always
volumes:
- name: prometheus-config-volume
configMap:
defaultMode: 420
name: prometheus-config
- name: prometheus-storage-volume
persistentVolumeClaim:
claimName: pvc-nfs-data
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-nfs-data
namespace: monitoring
labels:
type: nfs
app: prometheus-deployment
spec:
storageClassName: managed-nfs
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
nfs:
server: 10.10.10.80
path: "/mnt/nfs/promdata"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-nfs-data
namespace: monitoring
labels:
app: prometheus-deployment
spec:
storageClassName: managed-nfs
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: monitoring
apiVersion: v1
kind: Service
metadata:
name: prometheus-service
namespace: monitoring
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9090'
spec:
selector:
app: prometheus
type: NodePort
ports:
- port: 8080
targetPort: 9090
nodePort: 30909
## Deploy Demo Service Application (Kubernetes Cluster)
##### Step 1
Before apply manifest build the service's docker image and push it to remote repository
```sh
docker build -t ms-demo .
docker tag ms-demo mhdbashar/ms-demo
docker push mhdbashar/ms-demo
```
##### Step 2
Deploy Prometheus for monitoring in the cluster
```sh
kubectl apply -f Prometheus/*
```
##### Step 3
Deploy the service to cluster with horizontal pod autoscaling
```sh
kubectl apply -f app-deployment.yaml
kubectl apply -f hpa.yaml
```
\ No newline at end of file
apiVersion: v1 # Kubernetes API version
kind: Service # Kubernetes resource kind we are creating
metadata: # Metadata of the resource kind we are creating
name: ms-demo-service
spec:
selector:
app: ms-demo
ports:
- protocol: "TCP"
port: 8080 # The port that the service is running on in the cluster
targetPort: 8080 # The port exposed by the service
nodePort: 30999
type: LoadBalancer # type of the service.
---
apiVersion: apps/v1
kind: Deployment # Kubernetes resource kind we are creating
metadata:
name: ms-demo
spec:
selector:
matchLabels:
app: ms-demo
replicas: 2 # Number of replicas that will be created for this deployment
template:
metadata:
labels:
app: ms-demo
spec:
containers:
- name: ms-demo
image: mhdbashar/ms-demo # Image that will be used to containers in the cluster
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080 # The port that the container is running on in the cluster
resources: # important for hpa
limits:
cpu: "2"
memory: 2Gi
requests:
cpu: 500m
memory: 1Gi
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
labels:
app: ms-demo
name: ms-demo-hpa
spec:
maxReplicas: 10
minReplicas: 2
metrics:
- resource:
name: memory
target:
averageUtilization: 50
type: Utilization
type: Resource
- resource:
name: cpu
target:
averageUtilization: 50
type: Utilization
type: Resource
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: ms-demo
# kubectl autoscale deployment ms-demo-hpa --cpu-percent=50 --min=1 --max=10 // another way to create hpa
# kubectl get hpa ms-demo-hpa --watch // monitor hpa
# kubectl scale --replicas=2 deployment.apps/ms-demo //scale manually
\ No newline at end of file
## Deploy Demo Service Application (Docker Desktop)
### Docker Desktop Docker
Build the Docker image, deploy monitoring services (Prometheus and Grafana) to docker
```sh
docker build -t ms-demo .
docker-compose -f docker-compose-desktop.yml up
```
### Docker Desktop Kubernetes
##### Step 1
Build the Docker image
```sh
docker build -t ms-demo .
```
##### Step 2
Deploy Services to Docker Desktop Kubernetes
```sh
kubectl apply -f monitoring-deployment.yaml
kubectl apply -f app-deployment.yaml
```
apiVersion: v1 # Kubernetes API version
kind: Service # Kubernetes' resource kind we are creating
metadata: # Metadata of the resource kind we are creating
name: ms-demo-service
spec:
selector:
app: ms-demo
ports:
- protocol: "TCP"
port: 8080 # The port that the service is running on in the cluster
targetPort: 8080 # The port exposed by the service
type: LoadBalancer # type of the service.
---
apiVersion: apps/v1
kind: Deployment # Kubernetes' resource kind we are creating
metadata:
name: ms-demo
spec:
selector:
matchLabels:
app: ms-demo
replicas: 2 # Number of replicas that will be created for this deployment
template:
metadata:
labels:
app: ms-demo
spec:
containers:
- name: ms-demo
image: ms-demo # Image that will be used to containers in the cluster
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080 # The port that the container is running on in the cluster
\ No newline at end of file
version: '3'
services:
prometheus:
image: prom/prometheus
container_name: prometheus1
volumes:
- ./docker/prometheus-desktop.yml:/etc/prometheus/prometheus.yml
ports:
- "9090:9090"
networks:
- net
grafana:
image: grafana/grafana
container_name: grafana1
env_file:
- ./docker/grafana.env
ports:
- "3000:3000"
networks:
- net
networks:
net:
\ No newline at end of file
# [server]
# GF_SERVER_DOMAIN=localhost
# GF_SERVER_HTTP_PORT=3000
# GF_SERVER_PROTOCOL=http
\ No newline at end of file
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'autoscaling-monitor'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'Autoscaling-Microservice'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
metrics_path: /actuator/prometheus
static_configs:
# Use the service name (see docker-compose-desktop.yml) instead of localhost
- targets: ['host.docker.internal:8080']
#Ref:https://medium.com/@gurpreets0610/deploy-prometheus-grafana-on-kubernetes-cluster-e8395cc16f91
# First, we will create a Kubernetes namespace for all our monitoring components
apiVersion: v1
kind: Namespace
metadata:
name: monitoring
---
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: monitoring
data:
prometheus.yml: |
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
# MS-Demo App
- job_name: 'ms-demo-service'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
metrics_path: /actuator/prometheus
static_configs:
# Use the service name (see docker-compose-desktop.yml) instead of localhost
- targets: ['ms-demo-service.default.svc:8080']
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-manual-data
namespace: monitoring
labels:
type: manual
app: prometheus-deployment
spec:
#spec:https://stackoverflow.com/questions/54676429/how-to-access-persistentvolume-files-on-docker-for-desktop
storageClassName: manual
# https://stackoverflow.com/questions/71018631/kubernetes-on-docker-for-windows-persistent-volume-with-hostpath-gives-operatio
hostPath:
path: '/run/desktop/mnt/host/d/promdata'
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-manual-data
namespace: monitoring
labels:
app: prometheus-deployment
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: monitoring
labels:
app: prometheus
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9090"
spec:
containers:
- name: prometheus
image: prom/prometheus
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=6h'
- '--storage.tsdb.path=/prometheus'
- '--config.file=/etc/prometheus/prometheus.yml'
ports:
- name: web
containerPort: 9090
volumeMounts:
- name: prometheus-config-volume
mountPath: /etc/prometheus
- name: prometheus-storage-volume
mountPath: /prometheus
restartPolicy: Always
volumes:
- name: prometheus-config-volume
configMap:
defaultMode: 420
name: prometheus-config
- name: prometheus-storage-volume
persistentVolumeClaim:
claimName: pvc-manual-data
---
apiVersion: v1
kind: Service
metadata:
name: prometheus-service
namespace: monitoring
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '9090'
spec:
selector:
app: prometheus
type: NodePort
ports:
- port: 8080
targetPort: 9090
nodePort: 30000
---
FROM eclipse-temurin:17-jre-alpine
WORKDIR /opt
ENV PORT 8080
EXPOSE 8080
COPY target/*.jar /opt/app.jar
ENTRYPOINT exec java $JAVA_OPTS -jar app.jar
\ No newline at end of file
package com.master.demo.msdemo; package com.master.demo.msdemo;
import com.master.demo.msdemo.resources.FibonacciResource;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.boot.test.web.client.TestRestTemplate;
@SpringBootTest import static org.assertj.core.api.Assertions.assertThat;
@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
class MsDemoApplicationTests { class MsDemoApplicationTests {
@Test @Test
void contextLoads() { void contextLoads() {
} }
@Value(value="${local.server.port}")
private int port;
@Autowired
private TestRestTemplate restTemplate;
@Test
public void greetingShouldReturnDefaultMessage() throws Exception {
for (int i=0;i<30;++i) {
String s = this.restTemplate.getForObject("http://localhost:" + port + "/fib/"+i, String.class);
System.out.println(s);
}
}
// @Test
// void testFibo(){
// FibonacciResource fibonacciResource = new FibonacciResource();
// System.out.println(fibonacciResource.fibonacci(15));
// }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment