``` apiVersion: v1 kind: Secret metadata: name: secret-config type: Opaque data: cluster-secret: Y2U1MTlkMzFmN2Y4Y2ZkMjBlODA1MDZkZmVkM2NjZTVmZjZhYzQ0ODlhZWRhYTJhYzIxMjliOGQ0ZTkwNTkyNQ== bootstrap-peer-priv-key: Q0FBU3Bna3dnZ1NpQWdFQUFvSUJBUURSUlIyd0pPdjJQdEJNakZhSkE0elFDV1Q1cEdhSkJkNk5wOGVFWHMwSW9qRDNhYzVpeW1LTHpoWEdZSCtnKzRKaEtHeXZXWmg1MFk4bFhHRVk1RE9maFVHTUNCbzlJdVlZNHhRK1RxM3ZVUDdwOVBLOWhTRWJlNzFFRFA3ZG5yY1Y4ZCtHVnBaTHNlTGlFVFJQeCtvK0p6T05pQ0NmVmZ5NHFJU0c1NTV1QjBxbDVjamltUHBCS0NTTHF0VkpXbXJiMTViSUR3TWZQOXlFUVFMYm1PeWpoSVN4M0NsR3VyMEtKOGxKek1tcnNUUjNoWUkyTCtXRjlMMGd4UUcyN0FzTThwYjkrSExtYW1GTU9ZUHdaSjkzK0NJMmZKYlhzZEdmZTBaaGJEeVV0LzVmeWhxOGlLcmhhd2pQUElwZnZ6OW5PbDFyNTlVTWJaQmZRNGFiZVg2REFnTUJBQUVDZ2dFQUFTU2xndDdRd3ltWXJiSmVqcVVNVWQ2YlJKblhUWXhRcU9sdnQ1TmsvMVVUZTJhQ2tCdVc2amlYK2R0eEZ3b0lya3N3UE16NmZqL2N6bEZ3M2xmd3lFQk1HVjI1OXZlSDFqNzNUMWZTcjJuNzJuOVNEQ0w4Q1ZCbHVrZHY1UG9HK2dHMlBOK2dObzJrTmFQdWF6a21XR3lTWllXRzJCZjZKM0FVT1VCdXB4ODBpM0VPZDgwRGZFRlZYYTg5TjBVdkFkTDFNbUdJMUpDZXcrZUF0ajR4RzkraDd1K3ZXSDJJamFOQUp1YTkwbUg5bVBTaHR3MTVFOFlnVG5LZkhsQjB5OUNpd3Zqb3ZpSDlxUlhHS3lyMXpvNnJsSEdzeVZKR0FubzNVbVArSnRZdzNaOExKUU1MbFRkcmZZVkpCMnU1RU5lcFZHSGJ1MDhDQXZubElXUHhRUUtCZ1FEZm9vbFNhYm9xa3MvcnhpUVBSL2pDVkhDRzB3NnQyaVJ4QTlSM2t3czA1dFdqVXZENmliZTVPUzVyUFZjaTFNRVM0ME5KdjNnWWwxNjA4S2dWL2N1Wm4zaStjMUpXOTR0alhLOEkvbk9zKzUwWlA5UWE2enZNTHhrem9ZQjN2aVZPOGhYNWw3clRhSlUwZ0RxeDh6OHVCVGZFa01oOFYxS0Q5a2kvdStHbjhRS0JnUUR2amw5alJGTXFUZkFYSmxoZFBTbXU5WXp5WlhZcnVWZjNTSFN2T0N6dUNXSEtGTktyQ1p5NkNsSXdMTGJzSFNaOTNRaUl4Mk5UNkFFOHNaZzV0cElQKzNnQTNkV0NLUHUrUFN5ZmRMV2RCVTkrM3VEUGwxSyswdWM0N2kwck41UlVDRG9FZ0RMMVVZRHhxMm5heDM1S0wyOHFzNmlQcGtad2lJS1JDYnNoc3dLQmdGbVlOSXZmNzhXK3E2NDU0NWg3clIydDFxY3RSMlEvVVBvMkhwa0pRQ1FobHRXb0ZRTkt5V0JibUUwK2RmcHVZaXUvQU9ZNjMxSjYwbWFMUTh3THNkWUtIY1d2VGYycGp4NXZzM0JYQU9EOHJTUEFmcW1LTkpySkE3KzdwRXhVMFgyZlgwNXZpMW5IUEgrY0grZkxIRFREd21QdDNXUnpWNHVtejk2QTF4Y0JBb0dBRGV4SGJQeU9uTm5VeDNPcUZSazE3LzVEaDk4UHhERE1oUW5JQmxzTWVYVkFJcitkRXVYSEh5VDBhK3BkWTFHNloveE52NjFlLzRSdmRmaVljVnBFR1B1em1ORkwxSG5QVDlvOFdyUXN6Z2g5KzFDMjNqbmNURXp6dFdKYWE3V2FRRndPQ3k3c1IwZktaOEZmc0tDQm5Zd3VPTndpUXQ4V0h2cEFKRy9kV1hNQ2dZQkF3aTVsaU1zNy9ZVWF3R1ZaVFNGOEVVZkg3MXV5Y285WGlyU24ycmtKUS8wZnJuSFF2MWIxN050NVkyTk8xSHNnK1hjNndjMkg3bHpxYzBnQlNVUitPWGhBbUdlYUZXMkx1LzNaM3ZibWNZZWRHTFRBbDJCSUZ2Y3NOZXRBcFdMcmNIR1YybEtPWGg4Q3FuLzBTSDB1Mzd1WEZFYVI1cG9MbkRoNWtQbFRsUT09Cg== --- apiVersion: v1 kind: ConfigMap metadata: name: env-config data: bootstrap-peer-id: QmWppm9fwYPcNbWf2kxmdBxdfBXBvjGRADrYC2ohvQhy6T --- apiVersion: v1 kind: Service metadata: name: ipfs-cluster labels: app: ipfs-cluster spec: ports: - name: swarm targetPort: swarm port: 4001 - name: swarm-udp targetPort: swarm-udp port: 4002 - name: ws targetPort: ws port: 8081 - name: http targetPort: http port: 8080 - name: api-http targetPort: api-http port: 9094 - name: proxy-http targetPort: proxy-http port: 9095 - name: cluster-swarm targetPort: cluster-swarm port: 9096 selector: app: ipfs-cluster --- apiVersion: apps/v1 kind: StatefulSet metadata: name: ipfs-cluster spec: serviceName: ipfs-cluster replicas: 3 selector: matchLabels: app: ipfs-cluster template: metadata: labels: app: ipfs-cluster spec: initContainers: - name: configure-ipfs image: "ipfs/go-ipfs:latest" command: ["sh", "/custom/configure-ipfs.sh"] volumeMounts: - name: ipfs-storage mountPath: /data/ipfs - name: configure-script mountPath: /custom containers: - name: ipfs image: "ipfs/go-ipfs:cluster-k8s" imagePullPolicy: IfNotPresent env: - name: IPFS_FD_MAX value: "4096" ports: - name: swarm protocol: TCP containerPort: 4001 - name: swarm-udp protocol: UDP containerPort: 4002 - name: api protocol: TCP containerPort: 5001 - name: ws protocol: TCP containerPort: 8081 - name: http protocol: TCP containerPort: 8080 livenessProbe: tcpSocket: port: swarm initialDelaySeconds: 30 timeoutSeconds: 5 periodSeconds: 15 volumeMounts: - mountPath: /data/ipfs name: ipfs-storage - name: configure-script mountPath: /custom resources: {} - name: ipfs-cluster image: "ipfs/ipfs-cluster:latest" command: ["sh", "/custom/cluster-entrypoint.sh"] envFrom: - configMapRef: name: env-config env: - name: BOOTSTRAP_PEER_PRIV_KEY valueFrom: secretKeyRef: name: secret-config key: bootstrap-peer-priv-key - name: CLUSTER_SECRET valueFrom: secretKeyRef: name: secret-config key: cluster-secret - name: CLUSTER_MONITOR_PING_INTERVAL value: "3m" - name: SVC_NAME value: $(CLUSTER_SVC_NAME) ports: - name: api-http containerPort: 9094 protocol: TCP - name: proxy-http containerPort: 9095 protocol: TCP - name: cluster-swarm containerPort: 9096 protocol: TCP livenessProbe: tcpSocket: port: cluster-swarm initialDelaySeconds: 5 timeoutSeconds: 5 periodSeconds: 10 volumeMounts: - name: cluster-storage mountPath: /data/ipfs-cluster - name: configure-script mountPath: /custom resources: {} volumes: - name: configure-script configMap: name: ipfs-cluster-set-bootstrap-conf volumeClaimTemplates: - metadata: name: cluster-storage spec: storageClassName: do-block-storage accessModes: ["ReadWriteOnce"] persistentVolumeReclaimPolicy: Retain resources: requests: storage: 5Gi - metadata: name: ipfs-storage spec: storageClassName: do-block-storage accessModes: ["ReadWriteOnce"] persistentVolumeReclaimPolicy: Retain resources: requests: storage: 100Gi --- apiVersion: v1 kind: ConfigMap metadata: name: ipfs-cluster-set-bootstrap-conf data: cluster-entrypoint.sh: | #!/bin/sh user=ipfs # This is a custom entrypoint for k8s designed to connect to the bootstrap # node running in the cluster. It has been set up using a configmap to # allow changes on the fly. if [ ! -f /data/ipfs-cluster/service.json ]; then ipfs-cluster-service init -l debug fi PEER_HOSTNAME=`cat /proc/sys/kernel/hostname` grep -q ".*ipfs-cluster-0.*" /proc/sys/kernel/hostname if [ $? -eq 0 ]; then CLUSTER_ID=${BOOTSTRAP_PEER_ID} \ CLUSTER_PRIVATEKEY=${BOOTSTRAP_PEER_PRIV_KEY} \ exec ipfs-cluster-service daemon --upgrade --leave else BOOTSTRAP_ADDR=/dns4/${SVC_NAME}-0.${SVC_NAME}/tcp/9096/ipfs/${BOOTSTRAP_PEER_ID} if [ -z $BOOTSTRAP_ADDR ]; then exit 1 fi # Only ipfs user can get here exec ipfs-cluster-service daemon --upgrade --bootstrap $BOOTSTRAP_ADDR --leave fi configure-ipfs.sh: | #!/bin/sh set -x # This is a custom entrypoint for k8s designed to run ipfs nodes in an appropriate # setup for production scenarios. if [ -f /data/ipfs/repo.lock ]; then rm /data/ipfs/repo.lock fi ipfs init --profile="server,badgerds" ipfs config Datastore.StorageMax 80GB ipfs config --json Swarm.ConnMgr.HighWater 2000 ipfs config --json Datastore.BloomFilterSize 1048576 chown -R ipfs /data/ipfs exit 0 ```