From 945be1fa0ad2ce1a02303f4267bf6a221c60030f Mon Sep 17 00:00:00 2001 From: Shahab Dogar Date: Sun, 23 Nov 2025 00:20:55 +0500 Subject: [PATCH] feat: Cache | implement npm cache --- cache-infrastructure/index.ts | 26 +++- cache-infrastructure/nix/index.ts | 45 +++--- cache-infrastructure/npm/config.yaml | 19 +++ cache-infrastructure/npm/index.ts | 184 +++++++++++++++++++++++++ cache-infrastructure/pip/manifest.yaml | 128 +++++++++++++++++ 5 files changed, 382 insertions(+), 20 deletions(-) create mode 100644 cache-infrastructure/npm/config.yaml create mode 100644 cache-infrastructure/npm/index.ts create mode 100644 cache-infrastructure/pip/manifest.yaml diff --git a/cache-infrastructure/index.ts b/cache-infrastructure/index.ts index c627649..2c7992c 100644 --- a/cache-infrastructure/index.ts +++ b/cache-infrastructure/index.ts @@ -1,17 +1,39 @@ import { Construct } from "constructs"; import { TerraformStack } from "cdktf"; import { KubernetesProvider } from "@cdktf/provider-kubernetes/lib/provider"; +import { NamespaceV1 } from "@cdktf/provider-kubernetes/lib/namespace-v1"; import { NixCache } from "./nix"; +import { NpmCache } from "./npm"; export class CacheInfrastructure extends TerraformStack { constructor(scope: Construct, id: string) { super(scope, id); - const kubernetes = new KubernetesProvider(this, "kubernetes", { + const provider = new KubernetesProvider(this, "kubernetes", { configPath: "~/.kube/config", }); + const namespace = "package-cache"; + + new NamespaceV1(this, "package-cache-namespace", { + metadata: { + name: namespace, + }, + }); + // Add cache-related infrastructure components here - new NixCache(this, "nix-cache", kubernetes); + new NixCache(this, "nix-cache", { + provider, + namespace, + name: "nix-cache", + host: "nix.dogar.dev", + }); + + new NpmCache(this, "npm-cache", { + provider, + namespace, + name: "npm-cache", + host: "npm.dogar.dev", + }); } } diff --git a/cache-infrastructure/nix/index.ts b/cache-infrastructure/nix/index.ts index 407c217..4f8cc7c 100644 --- a/cache-infrastructure/nix/index.ts +++ b/cache-infrastructure/nix/index.ts @@ -8,14 +8,23 @@ import { ServiceV1 } from "@cdktf/provider-kubernetes/lib/service-v1"; import { PublicIngressRoute, LonghornPvc } from "../../utils"; +type NixCacheOptions = { + provider: KubernetesProvider; + name: string; + namespace: string; + host: string; +}; + export class NixCache extends Construct { - constructor(scope: Construct, id: string, provider: KubernetesProvider) { + constructor(scope: Construct, id: string, options: NixCacheOptions) { super(scope, id); + const { provider, name, namespace, host } = options; + const pvc = new LonghornPvc(this, "pvc", { provider, - name: "nix-cache", - namespace: "homelab", + name, + namespace, accessModes: ["ReadWriteMany"], size: "64Gi", }); @@ -25,11 +34,11 @@ export class NixCache extends Construct { "utf-8", ); - const configMap = new ConfigMapV1(this, "config-map", { + new ConfigMapV1(this, "config", { provider, metadata: { - name: "nix-cache", - namespace: "homelab", + name, + namespace, }, data: { "nix-cache.conf": nginxConfig, @@ -39,12 +48,12 @@ export class NixCache extends Construct { new ServiceV1(this, "service", { provider, metadata: { - name: "nix-cache", - namespace: "homelab", + name, + namespace, }, spec: { selector: { - app: "nix-cache", + app: name, }, port: [ { @@ -60,20 +69,20 @@ export class NixCache extends Construct { new DeploymentV1(this, "deployment", { provider, metadata: { - name: "nix-cache", - namespace: "homelab", + name, + namespace, }, spec: { replicas: "3", selector: { matchLabels: { - app: "nix-cache", + app: name, }, }, template: { metadata: { labels: { - app: "nix-cache", + app: name, }, }, spec: { @@ -104,7 +113,7 @@ export class NixCache extends Construct { { name: "nginx-config", configMap: { - name: configMap.metadata.name, + name, items: [ { key: "nix-cache.conf", @@ -121,10 +130,10 @@ export class NixCache extends Construct { new PublicIngressRoute(this, "ingress-route", { provider, - name: "nix-cache", - namespace: "homelab", - host: "nix.dogar.dev", - serviceName: "nix-cache", + name, + namespace, + host, + serviceName: name, servicePort: 80, }); } diff --git a/cache-infrastructure/npm/config.yaml b/cache-infrastructure/npm/config.yaml new file mode 100644 index 0000000..e8d4ed4 --- /dev/null +++ b/cache-infrastructure/npm/config.yaml @@ -0,0 +1,19 @@ +storage: /verdaccio/storage + +uplinks: + npmjs: + url: https://registry.npmjs.org/ + +packages: + "@*/*": + access: $all + publish: never + proxy: npmjs + + "**": + access: $all + publish: never + proxy: npmjs + +log: + - {type: stdout, format: pretty, level: http} diff --git a/cache-infrastructure/npm/index.ts b/cache-infrastructure/npm/index.ts new file mode 100644 index 0000000..0d8709b --- /dev/null +++ b/cache-infrastructure/npm/index.ts @@ -0,0 +1,184 @@ +import * as fs from "fs"; +import * as path from "path"; +import { Construct } from "constructs"; +import { KubernetesProvider } from "@cdktf/provider-kubernetes/lib/provider"; +import { DeploymentV1 } from "@cdktf/provider-kubernetes/lib/deployment-v1"; +import { ServiceV1 } from "@cdktf/provider-kubernetes/lib/service-v1"; +import { ConfigMapV1 } from "@cdktf/provider-kubernetes/lib/config-map-v1"; + +import { LonghornPvc, PublicIngressRoute } from "../../utils"; + +type NpmCacheOptions = { + provider: KubernetesProvider; + namespace: string; + host: string; + name: string; +}; + +export class NpmCache extends Construct { + constructor(scope: Construct, id: string, opts: NpmCacheOptions) { + super(scope, id); + + const { provider, namespace, name, host } = opts; + + new ConfigMapV1(this, "config", { + provider, + metadata: { + name, + namespace, + }, + data: { + "config.yaml": fs.readFileSync( + path.join(__dirname, "config.yaml"), + "utf8", + ), + }, + }); + + const pvc = new LonghornPvc(this, "pvc", { + provider, + namespace, + name, + size: "128Gi", + accessModes: ["ReadWriteMany"], + }); + + new ServiceV1(this, "service", { + provider, + metadata: { + name, + namespace, + }, + spec: { + selector: { + app: name, + }, + port: [ + { + port: 4873, + targetPort: name, + }, + ], + type: "ClusterIP", + }, + }); + + new DeploymentV1(this, "deployment", { + provider, + metadata: { + name, + namespace, + }, + spec: { + replicas: "3", + selector: { + matchLabels: { + app: name, + }, + }, + template: { + metadata: { + labels: { + app: name, + }, + }, + spec: { + nodeSelector: { + nodepool: "worker", + }, + topologySpreadConstraint: [ + { + maxSkew: 1, + topologyKey: "kubernetes.io/hostname", + whenUnsatisfiable: "DoNotSchedule", + labelSelector: [ + { + matchLabels: { + app: name, + }, + }, + ], + }, + ], + affinity: { + podAntiAffinity: { + requiredDuringSchedulingIgnoredDuringExecution: [ + { + topologyKey: "kubernetes.io/hostname", + labelSelector: [ + { + matchExpressions: [ + { + key: "app", + operator: "In", + values: [name], + }, + ], + }, + ], + }, + ], + }, + }, + volume: [ + { + name: "storage", + persistentVolumeClaim: { + claimName: pvc.name, + }, + }, + { + name: "config", + configMap: { + name, + }, + }, + ], + container: [ + { + name, + image: "verdaccio/verdaccio:latest", + env: [ + { + name: "VERDACCIO_APP_CONFIG", + value: "/verdaccio/conf/custom.yaml", + }, + { + name: "VERDACCIO_PORT", + value: "4873", + }, + ], + port: [ + { + name, + containerPort: 4873, + }, + ], + volumeMount: [ + { + name: "storage", + mountPath: "/verdaccio/storage", + }, + { + name: "config", + mountPath: "/verdaccio/conf/config.yaml", + subPath: "config.yaml", + }, + ], + }, + ], + }, + }, + }, + }); + + new PublicIngressRoute(this, "ingress", { + provider, + namespace, + name, + host, + serviceName: name, + servicePort: 4873, + }); + } +} diff --git a/cache-infrastructure/pip/manifest.yaml b/cache-infrastructure/pip/manifest.yaml new file mode 100644 index 0000000..32d407e --- /dev/null +++ b/cache-infrastructure/pip/manifest.yaml @@ -0,0 +1,128 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: devpi + namespace: homelab +spec: + storageClassName: longhorn + accessModes: + - ReadWriteMany + resources: + requests: + storage: 128Gi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: devpi + namespace: homelab +spec: + replicas: 3 + selector: + matchLabels: + app: devpi + template: + metadata: + labels: + app: devpi + spec: + nodeSelector: + nodepool: worker + + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app: devpi + + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - devpi + topologyKey: "kubernetes.io/hostname" + + containers: + - name: devpi + image: jonasal/devpi-server:latest + env: + - name: DEVPI_PASSWORD + valueFrom: + secretKeyRef: + name: devpi-secret + key: password + ports: + - containerPort: 3141 + volumeMounts: + - name: data + mountPath: /devpi + volumes: + - name: data + persistentVolumeClaim: + claimName: devpi +--- +apiVersion: v1 +kind: Service +metadata: + name: devpi + namespace: homelab +spec: + selector: + app: devpi + ports: + - port: 3141 + targetPort: 3141 + protocol: TCP + type: ClusterIP +--- +apiVersion: traefik.io/v1alpha1 +kind: Middleware +metadata: + name: devpi + namespace: homelab +spec: + ipAllowList: + sourceRange: + - "127.0.0.1/32" + - "10.43.0.0/16" + rateLimit: + average: 10 + burst: 50 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: devpi + namespace: homelab + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: "0" + cert-manager.io/cluster-issuer: "cloudflare-issuer" + cert-manager.io/acme-challenge-type: "dns01" + cert-manager.io/private-key-size: "4096" + + # Traefik Middleware + traefik.io/router.middlewares: "devpi@kubernetescrd" +spec: + ingressClassName: traefik + tls: + - hosts: + - pip.dogar.dev + secretName: devpi-tls + rules: + - host: pip.dogar.dev + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: devpi + port: + number: 3141