BYOK Scaleway cluster does not create SCW volumes

We used Helm values.yaml file generated by Qovery CLI to deploy a BYOK Scaleway cluster. It includes the following:

  scaleway:
    q-storageclass-scaleway:
      enabled: true

but when we create a new container DB on this cluster it doesn’t create a new volume on Scaleway.

I see it adds a new storage class scw-sbv-ssd-0 isn’t it suppose to map the PVC to Scaleway Volume?

Attaching full values.yaml file as well.
qovery:
  clusterId: &clusterId "${clusterId}"
  clusterShortId: &shortClusterId "${clusterShortId}"
  organizationId: &organizationId "${organizationId}"
  jwtToken: &jwtToken "${jwtToken}"
  domain: &domain "${domain}"
  domainWildcard: &domainWildcard "${domainWildcard}"
  qoveryDnsUrl: &qoveryDnsUrl "https://ddns.qovery.com"
  lokiUrl: &lokiUrl "http://loki.qovery.svc:3100"
  promtailLokiUrl: &promtailLokiUrl "http://loki.qovery.svc:3100/loki/api/v1/push"
  externalDnsPrefix: &externalDnsPrefix ${externalDnsPrefix}
  architectures: &architectures "AMD64"
  engineVersion: &engineVersion "5ea1568"
  clusterAgentVersion: &clusterAgentVersion "97c48aa7e85300f400dd1f15e387da9a5e592106"
  shellAgentVersion: &shellAgentVersion "f7b7014228e2fa115172be86b9768f2293d06203"
services:
  qovery:
    qovery-cluster-agent:
      enabled: true
    qovery-shell-agent:
      enabled: true
    qovery-engine:
      enabled: true
    qovery-priority-class:
      enabled: true
  ingress:
    ingress-nginx:
      enabled: true
  dns:
    external-dns:
      enabled: true
  logging:
    loki:
      enabled: true
    promtail:
      enabled: true
  certificates:
    cert-manager:
      enabled: false
    cert-manager-configs:
      enabled: false
    qovery-cert-manager-webhook:
      enabled: false
  observability: {}
  aws:
    q-storageclass-aws:
      enabled: false
    aws-ebs-csi-driver:
      enabled: false
  gcp:
    q-storageclass-gcp:
      enabled: false
  scaleway:
    q-storageclass-scaleway:
      enabled: true
qovery-cluster-agent:
  fullNameOverride: qovery-shell-agent
  image:
    tag: *clusterAgentVersion
  environmentVariables:
    CLUSTER_ID: *clusterId
    CLUSTER_JWT_TOKEN: *jwtToken
    LOKI_URL: *lokiUrl
    ORGANIZATION_ID: *organizationId
  useSelfSignCertificate: true
qovery-shell-agent:
  fullNameOverride: qovery-shell-agent
  image:
    tag: *shellAgentVersion
  environmentVariables:
    CLUSTER_ID: *clusterId
    CLUSTER_JWT_TOKEN: *jwtToken
    ORGANIZATION_ID: *organizationId
qovery-engine:
  image:
    tag: *engineVersion
  engineResources: null
  buildContainer:
    environmentVariables:
      BUILDER_CPU_ARCHITECTURES: *architectures
      BUILDER_ROOTLESS_ENABLED: 'true'
  environmentVariables:
    CLUSTER_ID: *clusterId
    CLUSTER_JWT_TOKEN: *jwtToken
    DOCKER_HOST: tcp://0.0.0.0:2375
    GRPC_SERVER: engine.qovery.com:443
    LIB_ROOT_DIR: /home/qovery/lib
    ORGANIZATION_ID: *organizationId
  autoscaler:
    enabled: false
    minReplicas: 3
    maxReplicas: 3
    averageValue: 0.5
ingress-nginx:
  controller:
    admissionWebhooks:
      enabled: false
    useComponentLabel: true
    allowSnippetAnnotations: true
    # enable if you want metrics scrapped by prometheus
    metrics:
      serviceMonitor:
    config:
      # set global default file size limit to 100m
      proxy-body-size: 100m
      # hide Nginx version
      server-tokens: "false"
      # required for X-Forwarded-for to work
      use-proxy-protocol: "true"
      # enable real IP (client IP)
      # default format can be found in the template: https://github.com/nginxinc/kubernetes-ingress/blob/v3.5.2/internal/configs/version1/nginx.tmpl#L44
      log-format-upstream: >
        $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"
    # the Ingress Class name to be used by Ingresses (use "nginx-qovery" for Qovery application/container deployments)
    ingressClass: nginx-qovery
    extraArgs:
      # Kubernetes path of the default Cert-manager TLS certificate (if used)
      default-ssl-certificate: "qovery/letsencrypt-acme-qovery-cert"
    updateStrategy:
      rollingUpdate:
        # set the minimum acceptable number of unavailable pods during a rolling update
        maxUnavailable: 1
    # enable auoscaling if you want to scale the number of replicas based on CPU usage
    autoscaling:
    # required if you rely on a load balancer
    # the controller mirrors the address of this service's endpoints to the load-balancer status of all Ingress objects it satisfies.
    publishService:
      enabled: true
    # set a load balancer if you want your Nginx to be publicly accessible
    service:
      enabled: true
      # https://github.com/scaleway/scaleway-cloud-controller-manager/blob/master/docs/loadbalancer-annotations.md
      annotations:
        service.beta.kubernetes.io/scw-loadbalancer-forward-port-algorithm: "leastconn"
        service.beta.kubernetes.io/scw-loadbalancer-protocol-http: "false"
        service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v1: "false"
        service.beta.kubernetes.io/scw-loadbalancer-proxy-protocol-v2: "true"
        service.beta.kubernetes.io/scw-loadbalancer-health-check-type: tcp
        service.beta.kubernetes.io/scw-loadbalancer-use-hostname: "true"
        # set Scaleway load balancer type https://www.scaleway.com/en/load-balancer/ (ex: LB-GP-S, LB-GP-M, LB-GP-L, LB-GP-XL)
        # Qovery managed DNS requieres *.$domain (something like: *.<cluster_id>.<given_dns_name>)
        external-dns.alpha.kubernetes.io/hostname: *domainWildcard
      externalTrafficPolicy: "Local"

external-dns:
  # set the provider to use
  provider: cloudflare
  # keep the config you want to use and remove the others. Configure the provider you want to use.
  cloudflare:
    apiToken: "${cloudflareApiToken}"
    email: "${cloudflareEmail}"
    proxied: true
  pdns:
    # Qovery DNS: apiUrl: *qoveryDnsUrl
    apiUrl: *qoveryDnsUrl
    # Qovery DNS: apiPort: "443"
    apiPort: *jwtToken
    # Qovery DNS: apiKey: "443"
    apiKey: 443
  # Make external DNS ignore this ingress https://github.com/kubernetes-sigs/external-dns/issues/1910#issuecomment-976371247
  annotationFilter: external-dns.alpha.kubernetes.io/exclude notin (true)
  # set domainFilters to the domain you want to manage: [*domain]
  domainFilters: [*domain]
  triggerLoopOnEvent: true
  policy: sync
  # avoid dns collision with other external-dns instances
  txtOwnerId: *shortClusterId
  txtPrefix: *externalDnsPrefix
promtail:
  fullnameOverride: promtail
  namespace: qovery
  priorityClassName: qovery-high-priority
  config:
    clients:
      - url: *promtailLokiUrl
    snippets:
      extraRelabelConfigs:
        - action: labelmap
          regex: __meta_kubernetes_pod_label_(qovery_com_service_id|qovery_com_service_type|qovery_com_environment_id)
  # GCP only allows /var/log/xxx to be mounted as hostPath
  defaultVolumes:
    - hostPath:
        path: /var/log/pods
      name: pods
    - emptyDir: {}
      name: run
  defaultVolumeMounts:
    - mountPath: /var/log/pods
      name: pods
      readOnly: true
    - mountPath: /run/promtail
      name: run
loki:
  fullnameOverride: loki
  loki:
    auth_enabled: false
    ingester:
      lifecycler:
        ring:
          kvstore:
            store: inmemory
          replication_factor: 1
    schema_config:
      configs:
        - from: 2020-05-15
          store: boltdb-shipper
          object_store: filesystem
          schema: v11
          index:
            prefix: index_
            period: 24h
  monitoring:
    dashboards:
      enabled: false
    rules:
      enabled: false
    serviceMonitor:
      enabled: false
      metricsInstance:
        enabled: false
    selfMonitoring:
      enabled: false
      grafanaAgent:
        installOperator: false
    grafanaAgent:
      enabled: false
    lokiCanary:
      enabled: false
  test:
    enabled: false
  gateway:
    enabled: false
  singleBinary:
    replicas: 1
    persistence:
      enabled: false
    extraVolumes:
      - name: data
        emptyDir: {}
      - name: storage
        emptyDir: {}
    extraVolumeMounts:
      - name: data
        mountPath: /data
      - name: storage
        mountPath: /var/loki

It appears the issue may be on the Scaleway side. I will update this topic once we find the cause.

1 Like

Hello @prki ,

Glad to see you are using our product.

Did you manage to solve your issue?

Regards,
Charles-Edouard

Scaleway support is still looking into the issue.

A quick update: BYOK Qovery configuration for Scaleway is correct, PVCs are mapped to Scaleway volumes but they are not displayed in Scaleway UI which caused us to initially report the issue here. We also encountered an issue where Scaleway failed to unmount PV from K8S node and the whole cluster was stuck in a weird state unable to scale up or down.

This topic was automatically closed 7 days after the last reply. New replies are no longer allowed.