Skip to content

Commit

Permalink
support .spec.pvcCapacity in PieProbe CR
Browse files Browse the repository at this point in the history
Some filesystems (like xfs and btrfs) only support volumes over certain
amounts of capacities. Currently, pie uses 100 MiB volumes in any case,
so it won't work on such filesystems.

This commit solves this problem by adding .spec.pvcCapacity field to
PieProbe custom resource. When the user set some value to
.spec.pvcCapacity, pie will use the value as the PVC's capacity.  The
default value of this field is 100 MiB, so pie should behave in the same
way as before if the user doesn't specify the field.

Signed-off-by: Ryotaro Banno <[email protected]>
  • Loading branch information
ushitora-anqou committed Aug 6, 2024
1 parent 8c617dc commit 03518ec
Show file tree
Hide file tree
Showing 7 changed files with 151 additions and 67 deletions.
6 changes: 6 additions & 0 deletions api/pie/v1alpha1/pieprobe_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package v1alpha1

import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

Expand All @@ -26,6 +27,11 @@ type PieProbeSpec struct {

//+kubebuilder:default:="1m"
ProbeThreshold metav1.Duration `json:"probeThreshold"`

//+kubebuilder:default:="100Mi"
//+kubebuilder:validation:Optional
//+kubebuilder:validation:XValidation:rule="self == oldSelf",message="pvcCapacity is immutable"
PVCCapacity *resource.Quantity `json:"pvcCapacity"`
}

// PieProbeStatus defines the observed state of PieProbe
Expand Down
5 changes: 5 additions & 0 deletions api/pie/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 10 additions & 0 deletions charts/pie/templates/pie.topolvm.io_pieprobes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,16 @@ spec:
probeThreshold:
default: 1m
type: string
pvcCapacity:
anyOf:
- type: integer
- type: string
default: 100Mi
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
x-kubernetes-validations:
- message: pvcCapacity is immutable
rule: self == oldSelf
required:
- monitoringStorageClass
- nodeSelector
Expand Down
10 changes: 10 additions & 0 deletions config/crd/bases/pie.topolvm.io_pieprobes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,16 @@ spec:
probeThreshold:
default: 1m
type: string
pvcCapacity:
anyOf:
- type: integer
- type: string
default: 100Mi
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
x-kubernetes-validations:
- message: pvcCapacity is immutable
rule: self == oldSelf
required:
- monitoringStorageClass
- nodeSelector
Expand Down
60 changes: 0 additions & 60 deletions internal/controller/common_test.go

This file was deleted.

12 changes: 5 additions & 7 deletions internal/controller/pie/pieprobe_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,12 +301,11 @@ func (r *PieProbeReconciler) createOrUpdatePVC(

pvc.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}
pvc.Spec.StorageClassName = &storageClass
pvc.Spec.Resources = corev1.VolumeResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceStorage: *resource.NewQuantity(
100*1024*1024, resource.BinarySI),
},

if pvc.Spec.Resources.Requests == nil {
pvc.Spec.Resources.Requests = map[corev1.ResourceName]resource.Quantity{}
}
pvc.Spec.Resources.Requests[corev1.ResourceStorage] = *pieProbe.Spec.PVCCapacity

ctrl.SetControllerReference(pieProbe, pvc, r.client.Scheme())

Expand Down Expand Up @@ -430,8 +429,7 @@ func (r *PieProbeReconciler) createOrUpdateJob(
StorageClassName: &storageClass,
Resources: corev1.VolumeResourceRequirements{
Requests: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceStorage: *resource.NewQuantity(
100*1024*1024, resource.BinarySI),
corev1.ResourceStorage: *pieProbe.Spec.PVCCapacity,
},
},
},
Expand Down
115 changes: 115 additions & 0 deletions internal/controller/pie/pieprobe_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package pie
import (
"context"
"os"
"strings"
"time"

. "github.com/onsi/ginkgo/v2"
Expand All @@ -12,6 +13,7 @@ import (
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
ctrl "sigs.k8s.io/controller-runtime"
Expand All @@ -34,6 +36,17 @@ func prepareObjects(ctx context.Context) error {
return err
}

storageClass2 := &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "sc2",
},
Provisioner: "sc-provisioner",
}
_, err = ctrl.CreateOrUpdate(ctx, k8sClient, storageClass2, func() error { return nil })
if err != nil {
return err
}

node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "192.168.0.1",
Expand Down Expand Up @@ -148,6 +161,108 @@ var _ = Describe("PieProbe controller", func() {
time.Sleep(100 * time.Millisecond)
})

It("should create PVCs with the capacity specified in the PieProbe resource", func() {
By("checking the default PVC's capacity is 100Mi")
Eventually(func(g Gomega) {
var pvcList corev1.PersistentVolumeClaimList
err := k8sClient.List(ctx, &pvcList, client.MatchingLabels(map[string]string{
"storage-class": "sc",
"node": "192.168.0.1",
}))
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(pvcList.Items)).Should(Equal(1))

capacity, ok := pvcList.Items[0].Spec.Resources.Requests.Storage().AsInt64()
g.Expect(ok).To(BeTrue())
g.Expect(capacity).Should(Equal(int64(100 * 1024 * 1024)))

var cronJobList batchv1.CronJobList
err = k8sClient.List(ctx, &cronJobList)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(cronJobList.Items)).Should(Equal(3))
for _, cronJob := range cronJobList.Items {
if !strings.HasPrefix(cronJob.GetName(), "provision-") {
continue
}
g.Expect(
cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.
Spec.Resources.Requests[corev1.ResourceStorage].Equal(*resource.NewQuantity(100*1024*1024, resource.BinarySI)),
).To(BeTrue())
}
}).Should(Succeed())

By("creating a new PieProbe with .spec.PVCCapacity 200Mi")
pieProbe2 := &piev1alpha1.PieProbe{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "pie-probe-sc2",
},
Spec: piev1alpha1.PieProbeSpec{
MonitoringStorageClass: "sc2",
NodeSelector: nodeSelector,
ProbePeriod: 1,
PVCCapacity: resource.NewQuantity(200*1024*1024, resource.BinarySI),
},
}
_, err := ctrl.CreateOrUpdate(ctx, k8sClient, pieProbe2, func() error { return nil })
Expect(err).NotTo(HaveOccurred())

By("checking the PVC's capacity is now 200Mi")
Eventually(func(g Gomega) {
var pvcList corev1.PersistentVolumeClaimList
err := k8sClient.List(ctx, &pvcList, client.MatchingLabels(map[string]string{
"storage-class": "sc2",
"node": "192.168.0.1",
}))
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(pvcList.Items)).Should(Equal(1))

capacity, ok := pvcList.Items[0].Spec.Resources.Requests.Storage().AsInt64()
g.Expect(ok).To(BeTrue())
g.Expect(capacity).Should(Equal(int64(200 * 1024 * 1024)))

var cronJobList batchv1.CronJobList
err = k8sClient.List(ctx, &cronJobList)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(cronJobList.Items)).Should(Equal(6))
for _, cronJob := range cronJobList.Items {
if !strings.HasPrefix(cronJob.GetName(), "provision-pie-probe--192.168.0.1-sc2-") {
continue
}
g.Expect(
cronJob.Spec.JobTemplate.Spec.Template.Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.
Spec.Resources.Requests[corev1.ResourceStorage].Equal(*resource.NewQuantity(200*1024*1024, resource.BinarySI)),
).To(BeTrue())
}
}).Should(Succeed())

By("cleaning up PVCs and CronJobs for sc2")
err = k8sClient.Delete(ctx, pieProbe2)
Expect(err).NotTo(HaveOccurred())
var pvcList corev1.PersistentVolumeClaimList
err = k8sClient.List(ctx, &pvcList, client.MatchingLabels(map[string]string{
"storage-class": "sc2",
}))
Expect(err).NotTo(HaveOccurred())
for _, pvc := range pvcList.Items {
pvc.ObjectMeta.Finalizers = []string{}
err = k8sClient.Update(ctx, &pvc)
Expect(err).NotTo(HaveOccurred())
err = k8sClient.Delete(ctx, &pvc)
Expect(err).NotTo(HaveOccurred())
}
var cronjobList batchv1.CronJobList
err = k8sClient.List(ctx, &cronjobList)
Expect(err).NotTo(HaveOccurred())
for _, cronjob := range cronjobList.Items {
if !strings.Contains(cronjob.GetName(), "-sc2-") {
continue
}
err = k8sClient.Delete(ctx, &cronjob)
Expect(err).NotTo(HaveOccurred())
}
})

It("should reject to edit monitoringStorageClass", func() {
By("trying to edit monitoringStorageClass")
var pieProbe piev1alpha1.PieProbe
Expand Down

0 comments on commit 03518ec

Please sign in to comment.