"Deploying Rook Ceph in Kubernetes: Seamless Storage Management for Your Cluster"

"Deploying Rook Ceph in Kubernetes: Seamless Storage Management for Your Cluster"

[root@master ~]# lsblk
NAME                 MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
sr0                   11:0    1  1024M  0 rom
nvme0n1              259:0    0   200G  0 disk
├─nvme0n1p1          259:1    0   600M  0 part /boot/efi
├─nvme0n1p2          259:2    0     1G  0 part /boot
└─nvme0n1p3          259:3    0 198.4G  0 part
  ├─rhel_master-root 253:0    0    70G  0 lvm  /
  ├─rhel_master-swap 253:1    0   6.8G  0 lvm
  └─rhel_master-home 253:2    0 121.6G  0 lvm  /home
[root@master ~]# kubectl get nodes
NAME                  STATUS   ROLES           AGE   VERSION
master.example.com    Ready    control-plane   11d   v1.31.2
worker1.example.com   Ready    <none>          11d   v1.31.2
worker2.example.com   Ready    <none>          11d   v1.31.2
worker3.example.com   Ready    <none>          11d   v1.31.2

Step1:// Add raw disk 200G of each worker Nodes 

[root@master ~]# ssh worker1
root@worker1's password:
Last login: Sat Nov  9 19:21:59 2024 from 192.168.29.15
[root@worker1 ~]# lsblk
NAME          MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
sr0            11:0    1  10.3G  0 rom
nvme0n1       259:0    0   200G  0 disk
├─nvme0n1p1   259:1    0   600M  0 part /boot/efi
├─nvme0n1p2   259:2    0     1G  0 part /boot
└─nvme0n1p3   259:3    0 198.4G  0 part
  ├─rhel-root 253:0    0    70G  0 lvm  /
  ├─rhel-swap 253:1    0   5.5G  0 lvm
  └─rhel-home 253:2    0 122.9G  0 lvm  /home
nvme0n2       259:4    0   200G  0 disk
[root@worker1 ~]# ssh worker2
root@worker2's password:
Last login: Sat Nov  9 19:22:45 2024 from 192.168.29.16
[root@worker2 ~]# lsblk
NAME          MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
sr0            11:0    1   991M  0 rom
nvme0n1       259:0    0   200G  0 disk
├─nvme0n1p1   259:1    0   600M  0 part /boot/efi
├─nvme0n1p2   259:2    0     1G  0 part /boot
└─nvme0n1p3   259:3    0 198.4G  0 part
  ├─rhel-root 253:0    0    70G  0 lvm  /
  ├─rhel-swap 253:1    0   7.3G  0 lvm
  └─rhel-home 253:2    0 121.1G  0 lvm  /home
nvme0n2       259:4    0   200G  0 disk
nbd8           43:256  0     0B  0 disk
nbd9           43:288  0     0B  0 disk
nbd10          43:320  0     0B  0 disk
nbd11          43:352  0     0B  0 disk
nbd12          43:384  0     0B  0 disk
nbd13          43:416  0     0B  0 disk
nbd14          43:448  0     0B  0 disk
nbd15          43:480  0     0B  0 disk
[root@worker2 ~]# ssh worker3
root@worker3's password:
Last login: Sat Nov  9 19:23:22 2024 from 192.168.29.17
[root@worker3 ~]# lsblk
NAME          MAJ:MIN RM   SIZE RO TYPE MOUNTPOINTS
sr0            11:0    1  10.3G  0 rom
nvme0n1       259:0    0   200G  0 disk
├─nvme0n1p1   259:1    0   600M  0 part /boot/efi
├─nvme0n1p2   259:2    0     1G  0 part /boot
└─nvme0n1p3   259:3    0 198.4G  0 part
  ├─rhel-root 253:0    0    70G  0 lvm  /
  ├─rhel-swap 253:1    0   6.4G  0 lvm
  └─rhel-home 253:2    0   122G  0 lvm  /home
nvme0n2       259:4    0   200G  0 disk


Step2:// fetch yaml from Rook Website 

$ git clone --single-branch --branch master https://github.com/rook/rook.git
cd rook/deploy/examples
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
kubectl create -f cluster.yaml



[root@master examples]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6


192.168.29.15 master.example.com  master
192.168.29.16 worker1.example.com worker1
192.168.29.17 worker2.example.com worker2
192.168.29.18 worker3.example.com worker3
[root@master examples]#


Step3:// Changes in the cluster.yaml and add the Worker Nodes Ip and disk details  

[root@master examples]# cat       cluster.yaml
#################################################################################################################
# Define the settings for the rook-ceph cluster with common settings for a production cluster.
# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
# in this example. See the documentation for more details on storage settings available.

# For example, to create the cluster:
#   kubectl create -f crds.yaml -f common.yaml -f operator.yaml
#   kubectl create -f cluster.yaml
#################################################################################################################

apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
  name: rook-ceph
  namespace: rook-ceph # namespace:cluster
spec:
  cephVersion:
    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
    # v18 is Reef, v19 is Squid
    # RECOMMENDATION: In production, use a specific version tag instead of the general v19 flag, which pulls the latest release and could result in different
    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
    # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v18.2.4-20240724
    # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
    image: quay.io/ceph/ceph:v18.2.4
    # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
    # Future versions such as Tentacle (v20) would require this to be set to `true`.
    # Do not set to true in production.
    allowUnsupported: false
  # The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
  dataDirHostPath: /var/lib/rook
  # Whether or not upgrade should continue even if a check fails
  # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
  # Use at your OWN risk
  # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
  skipUpgradeChecks: false
  # Whether or not continue if PGs are not clean during an upgrade
  continueUpgradeAfterChecksEvenIfNotHealthy: false
  # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
  # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
  # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
  # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
  # The default wait timeout is 10 minutes.
  waitTimeoutForHealthyOSDInMinutes: 10
  # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
  # This configuration will be ignored if `skipUpgradeChecks` is `true`.
  # Default is false.
  upgradeOSDRequiresHealthyPGs: false
  mon:
    # Set the number of mons to be started. Generally recommended to be 3.
    # For highest availability, an odd number of mons should be specified.
    count: 3
    # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
    # Mons should only be allowed on the same node for test environments where data loss is acceptable.
    allowMultiplePerNode: false
  mgr:
    # When higher availability of the mgr is needed, increase the count to 2.
    # In that case, one mgr will be active and one in standby. When Ceph updates which
    # mgr is active, Rook will update the mgr services to match the active mgr.
    count: 2
    allowMultiplePerNode: false
    modules:
      # List of modules to optionally enable or disable.
      # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
      - name: rook
        enabled: true
  # enable the ceph dashboard for viewing cluster status
  dashboard:
    enabled: true
    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
    # urlPrefix: /ceph-dashboard
    # serve the dashboard at the given port.
    # port: 8443
    # serve the dashboard using SSL
    ssl: true
    # The url of the Prometheus instance
    # prometheusEndpoint: <protocol>://<prometheus-host>:<port>
    # Whether SSL should be verified if the Prometheus server is using https
    # prometheusEndpointSSLVerify: false
  # enable prometheus alerting for cluster
  monitoring:
    # requires Prometheus to be pre-installed
    enabled: false
    # Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled.
    # If true, the prometheus mgr module and Ceph exporter are both disabled. Default is false.
    metricsDisabled: false
    # Ceph exporter metrics config.
    exporter:
      # Specifies which performance counters are exported.
      # Corresponds to --prio-limit Ceph exporter flag
      # 0 - all counters are exported
      perfCountersPrioLimit: 5
      # Time to wait before sending requests again to exporter server (seconds)
      # Corresponds to --stats-period Ceph exporter flag
      statsPeriodSeconds: 5
  network:
    connections:
      # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
      # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
      # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
      # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
      # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
      # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
      encryption:
        enabled: false
      # Whether to compress the data in transit across the wire. The default is false.
      # See the kernel requirements above for encryption.
      compression:
        enabled: false
      # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
      # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
      # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
      requireMsgr2: false
    # enable host networking
    #provider: host
    # enable the Multus network provider
    #provider: multus
    #selectors:
    #  The selector keys are required to be `public` and `cluster`.
    #  Based on the configuration, the operator will do the following:
    #    1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
    #    2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
    #
    #  In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
    #
    #  public: public-conf --> NetworkAttachmentDefinition object name in Multus
    #  cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
    # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
    #ipFamily: "IPv6"
    # Ceph daemons to listen on both IPv4 and Ipv6 networks
    #dualStack: false
    # Enable multiClusterService to export the mon and OSD services to peer cluster.
    # This is useful to support RBD mirroring between two clusters having overlapping CIDRs.
    # Ensure that peer clusters are connected using an MCS API compatible application, like Globalnet Submariner.
    #multiClusterService:
    #  enabled: false

  # enable the crash collector for ceph daemon crash collection
  crashCollector:
    disable: false
    # Uncomment daysToRetain to prune ceph crash entries older than the
    # specified number of days.
    #daysToRetain: 30
  # enable log collector, daemons will log on files and rotate
  logCollector:
    enabled: true
    periodicity: daily # one of: hourly, daily, weekly, monthly
    maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
  # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
  cleanupPolicy:
    # Since cluster cleanup is destructive to data, confirmation is required.
    # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
    # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
    # Rook will immediately stop configuring the cluster and only wait for the delete command.
    # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
    confirmation: ""
    # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
    sanitizeDisks:
      # method indicates if the entire disk should be sanitized or simply ceph's metadata
      # in both case, re-install is possible
      # possible choices are 'complete' or 'quick' (default)
      method: quick
      # dataSource indicate where to get random bytes from to write on the disk
      # possible choices are 'zero' (default) or 'random'
      # using random sources will consume entropy from the system and will take much more time then the zero source
      dataSource: zero
      # iteration overwrite N times instead of the default (1)
      # takes an integer value
      iteration: 1
    # allowUninstallWithVolumes defines how the uninstall should be performed
    # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
    allowUninstallWithVolumes: false
  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
  # tolerate taints with a key of 'storage-node'.
  # placement:
  #   all:
  #     nodeAffinity:
  #       requiredDuringSchedulingIgnoredDuringExecution:
  #         nodeSelectorTerms:
  #         - matchExpressions:
  #           - key: role
  #             operator: In
  #             values:
  #             - storage-node
  #     podAffinity:
  #     podAntiAffinity:
  #     topologySpreadConstraints:
  #     tolerations:
  #     - key: storage-node
  #       operator: Exists
  # The above placement information can also be specified for mon, osd, and mgr components
  #   mon:
  # Monitor deployments may contain an anti-affinity rule for avoiding monitor
  # collocation on the same node. This is a required rule when host network is used
  # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
  # preferred rule with weight: 50.
  #   osd:
  #    prepareosd:
  #    mgr:
  #    cleanup:
  annotations:
  #   all:
  #   mon:
  #   mgr:
  #   osd:
  #   exporter:
  #   crashcollector:
  #   cleanup:
  #   prepareosd:
  # cmdreporter is for jobs to detect ceph and csi versions, and check network status
  #   cmdreporter:
  # clusterMetadata annotations will be applied to only `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets.
  # And clusterMetadata annotations will not be merged with `all` annotations.
  #    clusterMetadata:
  #       kubed.appscode.com/sync: "true"
  # If no mgr annotations are set, prometheus scrape annotations will be set by default.
  #   mgr:
  labels:
  #   all:
  #   mon:
  #   osd:
  #   cleanup:
  #   mgr:
  #   prepareosd:
  # These labels are applied to ceph-exporter servicemonitor only
  #   exporter:
  # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
  # These labels can be passed as LabelSelector to Prometheus
  #   monitoring:
  #   crashcollector:
  resources:
  #The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
  #   mgr:
  #     limits:
  #       memory: "1024Mi"
  #     requests:
  #       cpu: "500m"
  #       memory: "1024Mi"
  # The above example requests/limits can also be added to the other components
  #   mon:
  #   osd:
  # For OSD it also is a possible to specify requests/limits based on device class
  #   osd-hdd:
  #   osd-ssd:
  #   osd-nvme:
  #   prepareosd:
  #   mgr-sidecar:
  #   crashcollector:
  #   logcollector:
  #   cleanup:
  #   exporter:
  #   cmd-reporter:
  # The option to automatically remove OSDs that are out and are safe to destroy.
  removeOSDsIfOutAndSafeToRemove: false
  priorityClassNames:
    #all: rook-ceph-default-priority-class
    mon: system-node-critical
    osd: system-node-critical
    mgr: system-cluster-critical
    #crashcollector: rook-ceph-crashcollector-priority-class
  storage: # cluster level storage configuration and selection
    useAllNodes: true
    useAllDevices: true
    #deviceFilter:
    config:
      # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
      # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
      # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
      # osdsPerDevice: "1" # this value can be overridden at the node or device level
      # encryptedDevice: "true" # the default value for this option is "false"
      # deviceClass: "myclass" # specify a device class for OSDs in the cluster
    allowDeviceClassUpdate: false # whether to allow changing the device class of an OSD after it is created
    allowOsdCrushWeightUpdate: false # whether to allow resizing the OSD crush weight after osd pvc is increased
    # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
    # nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.

    nodes:
       - name: "192.168.29.16"
         devices:
           - name: "nvme0n2"
             config:
              osdsPerDevice: "1"
       - name: "192.168.29.17"
         devices:
           - name: "nvme0n2"
             config:
             osdsPerDevice: "1"
       - name: "192.168.29.18"
         devices:
           - name: "nvme0n2"
             config:
             osdsPerDevice: "1"

    # nodes:
    #   - name: "172.17.4.201"
    #     devices: # specific devices to use for storage can be specified for each node
    #       - name: "sdb"
    #       - name: "nvme01" # multiple osds can be created on high performance devices
    #         config:
    #           osdsPerDevice: "5"
    #       - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
    #     config: # configuration can be specified at the node level which overrides the cluster level config
    #   - name: "172.17.4.301"
    #     deviceFilter: "^sd."
    # Whether to always schedule OSD pods on nodes declared explicitly in the "nodes" section, even if they are
    # temporarily not schedulable. If set to true, consider adding placement tolerations for unschedulable nodes.
    scheduleAlways: false
    # when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
    onlyApplyOSDPlacement: false
    # Time for which an OSD pod will sleep before restarting, if it stopped due to flapping
    # flappingRestartIntervalHours: 24
    # The ratio at which Ceph should block IO if the OSDs are too full. The default is 0.95.
    # fullRatio: 0.95
    # The ratio at which Ceph should stop backfilling data if the OSDs are too full. The default is 0.90.
    # backfillFullRatio: 0.90
    # The ratio at which Ceph should raise a health warning if the OSDs are almost full. The default is 0.85.
    # nearFullRatio: 0.85
  # The section for configuring management of daemon disruptions during upgrade or fencing.
  disruptionManagement:
    # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
    # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
    # block eviction of OSDs by default and unblock them safely when drains are detected.
    managePodBudgets: true
    # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
    # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.
    osdMaintenanceTimeout: 30
    # A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
    # Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
    # No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
    pgHealthCheckTimeout: 0

  # csi defines CSI Driver settings applied per cluster.
  csi:
    readAffinity:
      # Enable read affinity to enable clients to optimize reads from an OSD in the same topology.
      # Enabling the read affinity may cause the OSDs to consume some extra memory.
      # For more details see this doc:
      # https://rook.io/docs/rook/latest/Storage-Configuration/Ceph-CSI/ceph-csi-drivers/#enable-read-affinity-for-rbd-volumes
      enabled: false

    # cephfs driver specific settings.
    cephfs:
      # Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
      # kernelMountOptions: ""
      # Set CephFS Fuse mount options to use https://docs.ceph.com/en/latest/man/8/ceph-fuse/#options.
      # fuseMountOptions: ""

  # healthChecks
  # Valid values for daemons are 'mon', 'osd', 'status'
  healthCheck:
    daemonHealth:
      mon:
        disabled: false
        interval: 45s
      osd:
        disabled: false
        interval: 60s
      status:
        disabled: false
        interval: 60s
    # Change pod liveness probe timing or threshold values. Works for all mon,mgr,osd daemons.
    livenessProbe:
      mon:
        disabled: false
      mgr:
        disabled: false
      osd:
        disabled: false
    # Change pod startup probe timing or threshold values. Works for all mon,mgr,osd daemons.
    startupProbe:
      mon:
        disabled: false
      mgr:
        disabled: false
      osd:
        disabled: false


########################################################################################################

ADD these lines as per the worker Nodes IP with disk info 

 nodes:
       - name: "192.168.29.16"
         devices:
           - name: "nvme0n2"
             config:
              osdsPerDevice: "1"
       - name: "192.168.29.17"
         devices:
           - name: "nvme0n2"
             config:
             osdsPerDevice: "1"
       - name: "192.168.29.18"
         devices:
           - name: "nvme0n2"
             config:
             osdsPerDevice: "1"



watch kubectl get pods -n rook-ceph 

Every 2.0s: kubectl get pods -n rook-cephmaster.example.com: Fri Nov  8 13:33:08 2024NAMEREADY   STATUSRESTARTSAGE
csi-cephfsplugin-5l2253/3     Running     1 (11m ago)     11m
csi-cephfsplugin-7lxgv3/3     Running     1 (11m ago)     11m
csi-cephfsplugin-lbpvx3/3     Running     1 (11m ago)     11m
csi-cephfsplugin-provisioner-fc44fc7d-2xkh66/6     Running     1 (11m ago)     11m
csi-cephfsplugin-provisioner-fc44fc7d-4mfqj6/6     Running     1 (10m ago)     11m
csi-rbdplugin-kpvwr3/3     Running     1 (11m ago)     11m
csi-rbdplugin-kztzz3/3     Running     1 (11m ago)     11m
csi-rbdplugin-provisioner-76cf8b4f4b-fmrdk6/6     Running     3 (8m41s ago)   11m
csi-rbdplugin-provisioner-76cf8b4f4b-lfl566/6     Running     2 (6m46s ago)   11m
csi-rbdplugin-v82ns3/3     Running     1 (11m ago)     11m
rook-ceph-crashcollector-worker1.example.com-6c966ff779-6n5k9   1/1     Running     04m42s
rook-ceph-crashcollector-worker2.example.com-6876cdbc5b-ngpdv   1/1     Running     04m20s
rook-ceph-crashcollector-worker3.example.com-675f657989-kncnl   1/1     Running     04m27s
rook-ceph-exporter-worker1.example.com-c7449b58c-l7hqw1/1     Running     04m42s
rook-ceph-exporter-worker2.example.com-5df7bc8588-skc5d1/1     Running     04m17s
rook-ceph-exporter-worker3.example.com-5c96fff8d9-wdpd71/1     Running     04m24s
rook-ceph-mgr-a-5b7bf879db-lmbl73/3     Running     04m59s
rook-ceph-mgr-b-84d8b7856c-4bhll3/3     Running     04m58s
rook-ceph-mon-a-b5b5fc7db-8q6lh2/2     Running     011m
rook-ceph-mon-b-85874c476-z8fmx2/2     Running     010m
rook-ceph-mon-c-678d5f454f-w4dcb2/2     Running     05m17s
rook-ceph-operator-5f68d9fbc6-7klr81/1     Running     026m
rook-ceph-osd-0-5799fdf786-k4x2p2/2     Running     04m27s
rook-ceph-osd-1-75897b4bf-7wglt2/2     Running     04m26s
rook-ceph-osd-2-7b76ddfcfb-m2zhn2/2     Running     04m20s
rook-ceph-osd-prepare-worker1.example.com-dpd8q0/1     Completed   04m36s
rook-ceph-osd-prepare-worker2.example.com-nvzn90/1     Completed   04m35s
rook-ceph-osd-prepare-worker3.example.com-ljz620/1     Completed   04m35s
[root@master examples]# watch kubectl get pods -n rook-ceph 


NAME                                                            READY   STATUS      RESTARTS        AGE
csi-cephfsplugin-5l225                                          3/3     Running     1 (11m ago)     11m
csi-cephfsplugin-7lxgv                                          3/3     Running     1 (11m ago)     11m
csi-cephfsplugin-lbpvx                                          3/3     Running     1 (11m ago)     11m
csi-cephfsplugin-provisioner-fc44fc7d-2xkh6                     6/6     Running     1 (11m ago)     11m
csi-cephfsplugin-provisioner-fc44fc7d-4mfqj                     6/6     Running     1 (11m ago)     11m
csi-rbdplugin-kpvwr                                             3/3     Running     1 (11m ago)     11m
csi-rbdplugin-kztzz                                             3/3     Running     1 (11m ago)     11m
csi-rbdplugin-provisioner-76cf8b4f4b-fmrdk                      6/6     Running     3 (8m46s ago)   11m
csi-rbdplugin-provisioner-76cf8b4f4b-lfl56                      6/6     Running     2 (6m51s ago)   11m
csi-rbdplugin-v82ns                                             3/3     Running     1 (11m ago)     11m
rook-ceph-crashcollector-worker1.example.com-6c966ff779-6n5k9   1/1     Running     0               4m47s
rook-ceph-crashcollector-worker2.example.com-6876cdbc5b-ngpdv   1/1     Running     0               4m25s
rook-ceph-crashcollector-worker3.example.com-675f657989-kncnl   1/1     Running     0               4m32s
rook-ceph-exporter-worker1.example.com-c7449b58c-l7hqw          1/1     Running     0               4m47s
rook-ceph-exporter-worker2.example.com-5df7bc8588-skc5d         1/1     Running     0               4m22s
rook-ceph-exporter-worker3.example.com-5c96fff8d9-wdpd7         1/1     Running     0               4m29s
rook-ceph-mgr-a-5b7bf879db-lmbl7                                3/3     Running     0               5m4s
rook-ceph-mgr-b-84d8b7856c-4bhll                                3/3     Running     0               5m3s
rook-ceph-mon-a-b5b5fc7db-8q6lh                                 2/2     Running     0               11m
rook-ceph-mon-b-85874c476-z8fmx                                 2/2     Running     0               10m
rook-ceph-mon-c-678d5f454f-w4dcb                                2/2     Running     0               5m22s
rook-ceph-operator-5f68d9fbc6-7klr8                             1/1     Running     0               26m
rook-ceph-osd-0-5799fdf786-k4x2p                                2/2     Running     0               4m32s
rook-ceph-osd-1-75897b4bf-7wglt                                 2/2     Running     0               4m31s
rook-ceph-osd-2-7b76ddfcfb-m2zhn                                2/2     Running     0               4m25s
rook-ceph-osd-prepare-worker1.example.com-dpd8q                 0/1     Completed   0               4m41s
rook-ceph-osd-prepare-worker2.example.com-nvzn9                 0/1     Completed   0               4m40s
rook-ceph-osd-prepare-worker3.example.com-ljz62                 0/1     Completed   0               4m40s
[root@master examples]# kubect cl create -f tololbox.yaml 

deployment.apps/rook-ceph-tools created
[root@master examples]# kubectl create -f toolbox.yaml 
 kubectl get pods -n rook-ceph

NAME                                                            READY   STATUS      RESTARTS        AGE
csi-cephfsplugin-5l225                                          3/3     Running     1 (12m ago)     13m
csi-cephfsplugin-7lxgv                                          3/3     Running     1 (12m ago)     13m
csi-cephfsplugin-lbpvx                                          3/3     Running     1 (12m ago)     13m
csi-cephfsplugin-provisioner-fc44fc7d-2xkh6                     6/6     Running     1 (12m ago)     13m
csi-cephfsplugin-provisioner-fc44fc7d-4mfqj                     6/6     Running     1 (12m ago)     13m
csi-rbdplugin-kpvwr                                             3/3     Running     1 (12m ago)     13m
csi-rbdplugin-kztzz                                             3/3     Running     1 (12m ago)     13m
csi-rbdplugin-provisioner-76cf8b4f4b-fmrdk                      6/6     Running     3 (10m ago)     13m
csi-rbdplugin-provisioner-76cf8b4f4b-lfl56                      6/6     Running     2 (8m11s ago)   13m
csi-rbdplugin-v82ns                                             3/3     Running     1 (12m ago)     13m
rook-ceph-crashcollector-worker1.example.com-6c966ff779-6n5k9   1/1     Running     0               6m7s
rook-ceph-crashcollector-worker2.example.com-6876cdbc5b-ngpdv   1/1     Running     0               5m45s
rook-ceph-crashcollector-worker3.example.com-675f657989-kncnl   1/1     Running     0               5m52s
rook-ceph-exporter-worker1.example.com-c7449b58c-l7hqw          1/1     Running     0               6m7s
rook-ceph-exporter-worker2.example.com-5df7bc8588-skc5d         1/1     Running     0               5m42s
rook-ceph-exporter-worker3.example.com-5c96fff8d9-wdpd7         1/1     Running     0               5m49s
rook-ceph-mgr-a-5b7bf879db-lmbl7                                3/3     Running     0               6m24s
rook-ceph-mgr-b-84d8b7856c-4bhll                                3/3     Running     0               6m23s
rook-ceph-mon-a-b5b5fc7db-8q6lh                                 2/2     Running     0               12m
rook-ceph-mon-b-85874c476-z8fmx                                 2/2     Running     0               12m
rook-ceph-mon-c-678d5f454f-w4dcb                                2/2     Running     0               6m42s
rook-ceph-operator-5f68d9fbc6-7klr8                             1/1     Running     0               27m
rook-ceph-osd-0-5799fdf786-k4x2p                                2/2     Running     0               5m52s
rook-ceph-osd-1-75897b4bf-7wglt                                 2/2     Running     0               5m51s
rook-ceph-osd-2-7b76ddfcfb-m2zhn                                2/2     Running     0               5m45s
rook-ceph-osd-prepare-worker1.example.com-dpd8q                 0/1     Completed   0               6m1s
rook-ceph-osd-prepare-worker2.example.com-nvzn9                 0/1     Completed   0               6m
rook-ceph-osd-prepare-worker3.example.com-ljz62                 0/1     Completed   0               6m
rook-ceph-tools-68bf47bc65-mgvpv                                1/1     Running     0               47s
[root@master examples]# kubectrll -n rokok-ceph exec -it rook-ceph-tools-68bf47bc65-mgvpvrook-ceph-tools-68bf47bc65-mgvpv bash 

error: exec [POD] [COMMAND] is not supported anymore. Use exec [POD] -- [COMMAND] instead
See 'kubectl exec -h' for help and examples
[root@master examples]# kubectl exec -it rook-ceph-tools-68bf47bc65-mgvpvrook-ceph-tools-68bf47bc65-mgvpv -n rook-ceph bash 

error: exec [POD] [COMMAND] is not supported anymore. Use exec [POD] -- [COMMAND] instead
See 'kubectl exec -h' for help and examples
[root@master examples]# kubectl exec -it rook-ceph-tools-68bf47bc65-mgvpv -n rook-ceph bash  -- bin/bash 

bash-5.1$ ceph -s 

  cluster:
    id:     249cabb5-1159-40b3-b0cf-7a639eb1b990
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum c,b (age 0.303891s), out of quorum: a
    mgr: b(active, since 6m), standbys: a
    osd: 3 osds: 3 up (since 6m), 3 in (since 6m)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   53 MiB used, 400 GiB / 400 GiB avail
    pgs:     
 
bash-5.1$ ceph osd tree 

ID  CLASS  WEIGHT   TYPE NAME                     STATUS  REWEIGHT  PRI-AFF
-1         0.58589  root default                                           
-5         0.19530      host worker1-example-com                           
 1    ssd  0.19530          osd.1                     up   1.00000  1.00000
-7         0.19530      host worker2-example-com                           
 2    ssd  0.19530          osd.2                     up   1.00000  1.00000
-3         0.19530      host worker3-example-com                           
 0    ssd  0.19530          osd.0                     up   1.00000  1.00000
bash-5.1$ ceph osd status 

ID  HOST                  USED  AVAIL  WR OPS  WR DATA  RD OPS  RD DATA  STATE      
 0  worker3.example.com  26.5M   199G      0        0       0        0   exists,up  
 1  worker1.example.com  26.5M   199G      0        0       0        0   exists,up  
 2  worker2.example.com  26.5M   199G      0        0       0        0   exists,up  
bash-5.1$ exit

#        

要查看或添加评论,请登录