nomad csi volume plugin
This commit is contained in:
parent
d4e350ffcd
commit
d3a1f6f4f8
22
roles/nomad/tasks/csi_plugins.yml
Normal file
22
roles/nomad/tasks/csi_plugins.yml
Normal file
@ -0,0 +1,22 @@
|
||||
- name: Deploy CSI plugin
|
||||
community.general.nomad_job:
|
||||
host: "nomad.service.consul"
|
||||
state: present
|
||||
content: "{{ lookup('ansible.builtin.template', item + '.nomad.j2') }}"
|
||||
use_ssl: false
|
||||
register: nomad_job_spawn
|
||||
retries: 10
|
||||
delay: 5
|
||||
until: nomad_job_spawn.failed == false
|
||||
|
||||
- name: Check the job state is healthy
|
||||
ansible.builtin.uri:
|
||||
url: "http://nomad.service.consul:4646/v1/job/plugin-{{ item }}"
|
||||
method: GET
|
||||
remote_src: yes
|
||||
register: job_status
|
||||
until: job_status | json_query('json.Status') == 'running'
|
||||
|
||||
- name: debug
|
||||
debug:
|
||||
msg: "{{ nomad_job_spawn }}"
|
39
roles/nomad/tasks/csi_volumes.yml
Normal file
39
roles/nomad/tasks/csi_volumes.yml
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
|
||||
- name: Check if volume has already been created and registered
|
||||
ansible.builtin.uri:
|
||||
url: "http://nomad.service.consul:4646/v1/volume/csi/{{ item.id }}"
|
||||
method: GET
|
||||
remote_src: yes
|
||||
register: volume_exists
|
||||
ignore_errors: true
|
||||
|
||||
- name: Check for state of CSI Plugins to prevent race
|
||||
block:
|
||||
- name: Check controller plugin
|
||||
ansible.builtin.uri:
|
||||
url: "http://nomad.service.consul:4646/v1/job/plugin-nfs-controller"
|
||||
method: GET
|
||||
remote_src: yes
|
||||
register: controller_status
|
||||
until: controller_status | json_query('json.Status') == 'running'
|
||||
- name: Check node plugin
|
||||
ansible.builtin.uri:
|
||||
url: "http://nomad.service.consul:4646/v1/job/plugin-nfs-node"
|
||||
method: GET
|
||||
remote_src: yes
|
||||
register: node_status
|
||||
until: node_status | json_query('json.Status') == 'running'
|
||||
|
||||
- name: Create CSI volume with Nomad API
|
||||
ansible.builtin.uri:
|
||||
url: http://nomad.service.consul:4646/v1/volume/csi/{{ item.id }}/create
|
||||
method: PUT
|
||||
body_format: json
|
||||
body: "{{ lookup('template', 'csi-volumes.json.j2') }}"
|
||||
remote_src: yes
|
||||
register: csi_volume
|
||||
until: csi_volume.status == 200
|
||||
retries: 5
|
||||
delay: 10
|
||||
when: volume_exists.status == 404
|
@ -31,16 +31,19 @@
|
||||
loop: "{{ nomad_namespaces }}"
|
||||
run_once: true
|
||||
|
||||
#- name: Setup csi plugins
|
||||
# ansible.builtin.include_tasks: "csi_plugins.yml"
|
||||
# loop:
|
||||
# - azure_csi_controller
|
||||
# - azure_csi_node
|
||||
# when: nomad_csi_volumes is defined
|
||||
# run_once: true
|
||||
#
|
||||
#- name: Create csi volumes
|
||||
# ansible.builtin.include_tasks: "csi_volumes.yml"
|
||||
# loop: "{{ nomad_csi_volumes }}"
|
||||
# when: nomad_csi_volumes is defined
|
||||
# run_once: true
|
||||
- name: Setup csi plugins
|
||||
ansible.builtin.include_tasks: "csi_plugins.yml"
|
||||
loop:
|
||||
- nfs-controller
|
||||
- nfs-plugin-node
|
||||
when: nomad_csi_volumes is defined
|
||||
run_once: true
|
||||
|
||||
# We're playing with nfs csi plugin and cant create volumes automatically
|
||||
# because every nomad volume maps to a nfs share, and those needs to be created
|
||||
# in another server (FIXME)
|
||||
# - name: Create csi volumes
|
||||
# ansible.builtin.include_tasks: "csi_volumes.yml"
|
||||
# loop: "{{ nomad_csi_volumes }}"
|
||||
# when: nomad_csi_volumes is defined
|
||||
# run_once: true
|
||||
|
24
roles/nomad/tasks/testjob-with-volumes.nomad.hcl
Normal file
24
roles/nomad/tasks/testjob-with-volumes.nomad.hcl
Normal file
@ -0,0 +1,24 @@
|
||||
job "test" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
group "server" {
|
||||
count = 2
|
||||
volume "test" {
|
||||
type = "csi"
|
||||
source = "test"
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
task "server" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "alpine"
|
||||
args = ["sleep", "3600"]
|
||||
}
|
||||
volume_mount {
|
||||
volume = "test"
|
||||
destination = "/mnt/test"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
18
roles/nomad/templates/csi-volumes.json.j2
Normal file
18
roles/nomad/templates/csi-volumes.json.j2
Normal file
@ -0,0 +1,18 @@
|
||||
type = "csi"
|
||||
id = "{{ item.id }}"
|
||||
name = "{{ item.name }}"
|
||||
plugin_id = "nfsofficial"
|
||||
external_id = "{{ item.id }}"
|
||||
capability {
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
context {
|
||||
server = "192.168.1.2"
|
||||
share = "/srv/nfs-exports/proxmox"
|
||||
mountPermissions = "0"
|
||||
}
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "timeo=30", "intr", "vers=3", "_netdev" , "nolock" ]
|
||||
}
|
27
roles/nomad/templates/nfs-controller.nomad.j2
Normal file
27
roles/nomad/templates/nfs-controller.nomad.j2
Normal file
@ -0,0 +1,27 @@
|
||||
job "plugin-nfs-controller" {
|
||||
datacenters = ["{{ nomad_datacenter }}"]
|
||||
group "controller" {
|
||||
task "plugin" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "registry.k8s.io/sig-storage/nfsplugin:v4.1.0"
|
||||
args = [
|
||||
"--v=5",
|
||||
"--nodeid=${attr.unique.hostname}",
|
||||
"--endpoint=unix:///csi/csi.sock",
|
||||
"--drivername=nfs.csi.k8s.io"
|
||||
]
|
||||
}
|
||||
csi_plugin {
|
||||
id = "nfsofficial"
|
||||
type = "controller"
|
||||
mount_dir = "/csi"
|
||||
}
|
||||
resources {
|
||||
memory = 32
|
||||
cpu = 100
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
31
roles/nomad/templates/nfs-plugin-node.nomad.j2
Normal file
31
roles/nomad/templates/nfs-plugin-node.nomad.j2
Normal file
@ -0,0 +1,31 @@
|
||||
job "plugin-nfs-node" {
|
||||
datacenters = ["{{ nomad_datacenter }}"]
|
||||
# you can run node plugins as service jobs as well, but this ensures
|
||||
# that all nodes in the DC have a copy.
|
||||
type = "system"
|
||||
group "nodes" {
|
||||
task "plugin" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "registry.k8s.io/sig-storage/nfsplugin:v4.1.0"
|
||||
args = [
|
||||
"--v=5",
|
||||
"--nodeid=${attr.unique.hostname}",
|
||||
"--endpoint=unix:///csi/csi.sock",
|
||||
"--drivername=nfs.csi.k8s.io"
|
||||
]
|
||||
# node plugins must run as privileged jobs because they
|
||||
# mount disks to the host
|
||||
privileged = true
|
||||
}
|
||||
csi_plugin {
|
||||
id = "nfsofficial"
|
||||
type = "node"
|
||||
mount_dir = "/csi"
|
||||
}
|
||||
resources {
|
||||
memory = 10
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
18
roles/nomad/templates/nfs-volume.hcl.j2
Normal file
18
roles/nomad/templates/nfs-volume.hcl.j2
Normal file
@ -0,0 +1,18 @@
|
||||
type = "csi"
|
||||
id = "test"
|
||||
name = "test"
|
||||
plugin_id = "nfsofficial"
|
||||
external_id = "testid"
|
||||
capability {
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
context {
|
||||
server = "192.168.1.2"
|
||||
share = "/srv/nfs-exports/nomad"
|
||||
mountPermissions = "0"
|
||||
}
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "timeo=30", "intr", "vers=3", "_netdev" , "nolock" ]
|
||||
}
|
Loading…
Reference in New Issue
Block a user