Explorar o código

fix: update image output handling in release.yml (#391)

* feat: add install script and update image handling in Kubefile

* fix: add charts for aiproxy

* fix: update ingress path for aiproxy service

* feat: add Sealos AI Proxy admin configuration to app.yaml

* fix: update adminKey retrieval logic in install script

* fix: reorder deletion of aiproxy resources in install script

* fix: simplify image list preparation in release.yml

* fix: update image caching process in release.yml
cuisongliu hai 2 meses
pai
achega
5aa3862294
Modificáronse 28 ficheiros con 920 adicións e 552 borrados
  1. 44 0
      .github/scripts/install.sh
  2. 86 1
      .github/workflows/release.yml
  3. 6 21
      core/deploy/Kubefile
  4. 23 0
      core/deploy/charts/aiproxy-database/.helmignore
  5. 24 0
      core/deploy/charts/aiproxy-database/Chart.yaml
  6. 0 0
      core/deploy/charts/aiproxy-database/templates/NOTES.txt
  7. 58 0
      core/deploy/charts/aiproxy-database/templates/_helpers.tpl
  8. 86 0
      core/deploy/charts/aiproxy-database/templates/aiproxy-log.yaml
  9. 104 0
      core/deploy/charts/aiproxy-database/templates/aiproxy-redis.yaml
  10. 86 0
      core/deploy/charts/aiproxy-database/templates/aiproxy.yaml
  11. 55 0
      core/deploy/charts/aiproxy-database/values.yaml
  12. 23 0
      core/deploy/charts/aiproxy/.helmignore
  13. 24 0
      core/deploy/charts/aiproxy/Chart.yaml
  14. 0 0
      core/deploy/charts/aiproxy/templates/NOTES.txt
  15. 58 0
      core/deploy/charts/aiproxy/templates/_helpers.tpl
  16. 21 0
      core/deploy/charts/aiproxy/templates/app.yaml
  17. 8 0
      core/deploy/charts/aiproxy/templates/configmap.yaml
  18. 65 0
      core/deploy/charts/aiproxy/templates/deploy.yaml
  19. 39 0
      core/deploy/charts/aiproxy/templates/ingress.yaml
  20. 31 0
      core/deploy/charts/aiproxy/values.yaml
  21. 79 0
      core/deploy/install.sh
  22. 0 17
      core/deploy/manifests/aiproxy-config.yaml.tmpl
  23. 0 70
      core/deploy/manifests/deploy.yaml.tmpl
  24. 0 37
      core/deploy/manifests/ingress.yaml.tmpl
  25. 0 94
      core/deploy/manifests/pgsql-log.yaml
  26. 0 94
      core/deploy/manifests/pgsql.yaml
  27. 0 107
      core/deploy/manifests/redis.yaml
  28. 0 111
      core/deploy/scripts/init.sh

+ 44 - 0
.github/scripts/install.sh

@@ -0,0 +1,44 @@
+#!/bin/bash
+set -e
+timestamp() {
+  date +"%Y-%m-%d %T"
+}
+
+error() {
+  flag=$(timestamp)
+  echo -e "\033[31m ERROR [$flag] >> $* \033[0m"
+  exit 1
+}
+
+logger() {
+  flag=$(timestamp)
+  echo -e "\033[36m INFO [$flag] >> $* \033[0m"
+}
+
+warn() {
+  flag=$(timestamp)
+  echo -e "\033[33m WARN [$flag] >> $* \033[0m"
+}
+
+debug() {
+  flag=$(timestamp)
+  echo -e "\033[32m DEBUG [$flag] >> $* \033[0m"
+}
+
+check_file_exits() {
+  for f; do
+    if [[ -f $f ]]; then
+      logger "The machine $f is installed"
+      exit 0
+    fi
+  done
+}
+
+check_file_exits /usr/bin/sealos
+
+pushd "$(mktemp -d)" >/dev/null || exit
+  until curl -sLo "sealos.tar.gz"  "https://github.com/labring/sealos/releases/download/v5.1.0-beta3/sealos_5.1.0-beta3_linux_amd64.tar.gz"; do sleep 3; done
+  tar -zxf sealos.tar.gz sealos &&  chmod +x sealos && mv sealos /usr/bin
+  rm -rf sealos.tar.gz
+  sealos version
+popd >/dev/null

+ 86 - 1
.github/workflows/release.yml

@@ -20,7 +20,7 @@ env:
   DOCKERHUB_REPO: ${{ secrets.DOCKERHUB_REPO != '' && secrets.DOCKERHUB_REPO || secrets.DOCKERHUB_USERNAME != '' && format('{0}/{1}', secrets.DOCKERHUB_USERNAME, 'aiproxy') || '' }}
   GHCR_REPO: ghcr.io/${{ github.repository }}
   ALIYUN_REGISTRY: ${{ secrets.ALIYUN_REGISTRY }}
-  ALIYUN_REPO: ${{ secrets.ALIYUN_REPO != '' && secrets.ALIYUN_REPO || secrets.ALIYUN_USERNAME != '' && format('{0}/{1}/{2}', secrets.ALIYUN_REGISTRY, secrets.ALIYUN_USERNAME, 'aiproxy') || '' }}
+  ALIYUN_REPO: ${{ secrets.ALIYUN_REPO != '' && secrets.ALIYUN_REPO || (secrets.ALIYUN_REGISTRY != '' && secrets.ALIYUN_USERNAME != '') && format('{0}/{1}/{2}', secrets.ALIYUN_REGISTRY, secrets.ALIYUN_USERNAME, 'aiproxy') || '' }}
 
 jobs:
   release-web:
@@ -282,3 +282,88 @@ jobs:
       - name: Inspect image
         run: |
           docker buildx imagetools inspect ${{ env.GHCR_REPO }}:${{ steps.meta.outputs.version }}
+  release-sealos-images:
+    name: Push Sealos Images
+    permissions:
+      packages: write
+    needs: release-docker-images
+    runs-on: ubuntu-24.04
+    if: ${{ github.event_name != 'pull_request' && github.actor != 'dependabot[bot]' }}
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v5
+      - name: install cache images tools
+        run: |
+          sudo bash ./.github/scripts/install.sh
+      - name: Set up QEMU
+        uses: docker/setup-qemu-action@v3
+      - uses: docker/setup-buildx-action@v3
+      - name: Prepare cluster image list
+        id: cluster_image_targets
+        run: |
+          set -euo pipefail
+          images=("${GHCR_REPO}-cluster")
+          if [ -n "${DOCKERHUB_REPO}" ]; then
+            images+=("${DOCKERHUB_REPO}-cluster")
+          fi
+          if [ -n "${ALIYUN_REPO}" ]; then
+            images+=("${ALIYUN_REPO}-cluster")
+          fi
+
+          {
+            echo "images<<EOF"
+            printf '%s\n' "${images[@]}"
+            echo "EOF"
+            csv=$(IFS=','; printf '%s' "${images[*]}")
+            echo "names=${csv}"
+          } >> "${GITHUB_OUTPUT}"
+      - name: Extract metadata (tags, labels) for Docker
+        id: meta
+        uses: docker/metadata-action@v5
+        with:
+          images: ${{ steps.cluster_image_targets.outputs.images }}
+          tags: |
+            type=ref,event=branch
+            type=ref,event=pr
+            type=ref,event=tag
+            type=semver,pattern={{version}}
+            type=semver,pattern={{major}}.{{minor}}
+            type=semver,pattern={{major}}
+            type=sha
+      - name: cache images
+        working-directory: core/deploy
+        run: |
+          sudo sealos login -u "${{ github.repository_owner }}" -p "${{ secrets.GITHUB_TOKEN }}" ghcr.io
+          sed -i "s#image: ghcr.io/labring/aiproxy:latest#image: ${{ env.GHCR_REPO }}:${{ steps.meta.outputs.version }}#g" charts/aiproxy/values.yaml
+          sudo sealos registry save --registry-dir=registry_amd64 --arch amd64 .
+          sudo sealos registry save --registry-dir=registry_arm64 --arch arm64 .
+      - name: Login to GitHub Container Registry
+        uses: docker/login-action@v3
+        with:
+          registry: ghcr.io
+          username: ${{ github.actor }}
+          password: ${{ secrets.GITHUB_TOKEN }}
+
+      - name: Login to DockerHub
+        uses: docker/login-action@v3
+        if: ${{ env.DOCKERHUB_REPO }}
+        with:
+          username: ${{ secrets.DOCKERHUB_USERNAME }}
+          password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+      - name: Login to Aliyun Registry
+        uses: docker/login-action@v3
+        if: ${{ env.ALIYUN_REGISTRY }}
+        with:
+          registry: ${{ env.ALIYUN_REGISTRY }}
+          username: ${{ secrets.ALIYUN_USERNAME }}
+          password: ${{ secrets.ALIYUN_PASSWORD }}
+      - name: Build images
+        uses: docker/build-push-action@v6
+        with:
+          context: ./core/deploy
+          file: ./core/deploy/Kubefile
+          labels: ${{ steps.meta.outputs.labels }}
+          platforms: linux/amd64,linux/arm64
+          push: ${{ github.event_name != 'pull_request' }}
+          tags: ${{ steps.meta.outputs.tags }}

+ 6 - 21
core/deploy/Kubefile

@@ -1,23 +1,8 @@
-FROM scratch
-COPY registry registry
-COPY manifests manifests
-COPY scripts scripts
+FROM --platform=$BUILDPLATFORM scratch
+ARG TARGETARCH
+COPY registry_${TARGETARCH} registry
 
-ENV cloudDomain="127.0.0.1.nip.io"
-ENV cloudPort=""
-ENV certSecretName="wildcard-cert"
+COPY install.sh install.sh
+COPY charts charts
 
-ENV ADMIN_KEY=""
-ENV SEALOS_JWT_KEY="<sealos-jwt-key-placeholder>"
-ENV SQL_DSN="<sql-placeholder>"
-ENV LOG_SQL_DSN="<sql-log-placeholder>"
-ENV REDIS="<redis-placeholder>"
-
-ENV BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE="false"
-ENV BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT="1"
-
-ENV SAVE_ALL_LOG_DETAIL="false"
-ENV LOG_DETAIL_REQUEST_BODY_MAX_SIZE="128"
-ENV LOG_DETAIL_RESPONSE_BODY_MAX_SIZE="128"
-
-CMD ["bash scripts/init.sh"]
+CMD ["bash install.sh"]

+ 23 - 0
core/deploy/charts/aiproxy-database/.helmignore

@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 24 - 0
core/deploy/charts/aiproxy-database/Chart.yaml

@@ -0,0 +1,24 @@
+apiVersion: v2
+name: aiproxy-database
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"

+ 0 - 0
core/deploy/charts/aiproxy-database/templates/NOTES.txt


+ 58 - 0
core/deploy/charts/aiproxy-database/templates/_helpers.tpl

@@ -0,0 +1,58 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "aiproxy-database.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "aiproxy-database.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "aiproxy-database.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "aiproxy-database.labels" -}}
+helm.sh/chart: {{ include "aiproxy-database.chart" . }}
+{{ include "aiproxy-database.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "aiproxy-database.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "aiproxy-database.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "aiproxy-database.serviceAccountName" -}}
+{{- default (include "aiproxy-database.fullname" .) .Values.serviceAccount.name }}
+{{- end }}

+ 86 - 0
core/deploy/charts/aiproxy-database/templates/aiproxy-log.yaml

@@ -0,0 +1,86 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.pgsqlLog.name}}
+    app.kubernetes.io/instance: {{.Values.pgsqlLog.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.pgsqlLog.name}}
+  namespace: {{.Release.Namespace}}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.pgsqlLog.name}}
+    app.kubernetes.io/instance: {{.Values.pgsqlLog.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.pgsqlLog.name}}
+  namespace: {{.Release.Namespace}}
+rules:
+  - apiGroups:
+      - '*'
+    resources:
+      - '*'
+    verbs:
+      - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.pgsqlLog.name}}
+    app.kubernetes.io/instance: {{.Values.pgsqlLog.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.pgsqlLog.name}}
+  namespace: {{.Release.Namespace}}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{.Values.pgsqlLog.name}}
+subjects:
+  - kind: ServiceAccount
+    name: {{.Values.pgsqlLog.name}}
+    namespace: {{.Release.Namespace}}
+---
+apiVersion: apps.kubeblocks.io/v1alpha1
+kind: Cluster
+metadata:
+  labels:
+    clusterdefinition.kubeblocks.io/name: postgresql
+    clusterversion.kubeblocks.io/name: {{ .Values.pgsqlLog.version }}
+  name: {{.Values.pgsqlLog.name}}
+  namespace: {{.Release.Namespace}}
+spec:
+  affinity:
+    nodeLabels: {}
+    podAntiAffinity: Preferred
+    tenancy: SharedNode
+    topologyKeys:
+      - kubernetes.io/hostname
+  backup:
+    cronExpression: {{.Values.pgsqlLog.backup.cron}}
+    enabled: true
+    method: dump
+    pitrEnabled: true
+    retentionPeriod: {{.Values.pgsqlLog.backup.retentionPeriod}}
+  clusterDefinitionRef: postgresql
+  clusterVersionRef: {{ .Values.pgsqlLog.version }}
+  componentSpecs:
+    - componentDefRef: postgresql
+      monitor: true
+      name: postgresql
+      replicas: {{ .Values.pgsqlLog.replicas }}
+      resources: {{ toYaml .Values.pgsqlLog.resources | nindent 8 }}
+      serviceAccountName: {{.Values.pgsqlLog.name}}
+      switchPolicy:
+        type: Noop
+      volumeClaimTemplates:
+        - name: data
+          spec:
+            accessModes:
+              - ReadWriteOnce
+            resources:
+              requests:
+                storage: {{ .Values.pgsqlLog.storage }}
+  terminationPolicy: Delete

+ 104 - 0
core/deploy/charts/aiproxy-database/templates/aiproxy-redis.yaml

@@ -0,0 +1,104 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.redis.name}}
+    app.kubernetes.io/instance: {{.Values.redis.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.redis.name}}
+  namespace: {{.Release.Namespace}}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.redis.name}}
+    app.kubernetes.io/instance: {{.Values.redis.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.redis.name}}
+  namespace: {{.Release.Namespace}}
+rules:
+  - apiGroups:
+      - '*'
+    resources:
+      - '*'
+    verbs:
+      - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.redis.name}}
+    app.kubernetes.io/instance: {{.Values.redis.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.redis.name}}
+  namespace: {{.Release.Namespace}}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{.Values.redis.name}}
+subjects:
+  - kind: ServiceAccount
+    name: {{.Values.redis.name}}
+    namespace: {{.Release.Namespace}}
+---
+apiVersion: apps.kubeblocks.io/v1alpha1
+kind: Cluster
+metadata:
+  labels:
+    clusterdefinition.kubeblocks.io/name: redis
+    clusterversion.kubeblocks.io/name: {{ .Values.redis.version }}
+  name: {{.Values.redis.name}}
+  namespace: {{.Release.Namespace}}
+spec:
+  affinity:
+    nodeLabels: {}
+    podAntiAffinity: Preferred
+    tenancy: SharedNode
+    topologyKeys:
+      - kubernetes.io/hostname
+  backup:
+    cronExpression: {{.Values.redis.backup.cron}}
+    enabled: true
+    method: dump
+    pitrEnabled: true
+    retentionPeriod: {{.Values.redis.backup.retentionPeriod}}
+  clusterDefinitionRef: redis
+  clusterVersionRef: {{ .Values.redis.version }}
+  componentSpecs:
+    - componentDefRef: redis
+      monitor: true
+      name: redis
+      replicas: {{ .Values.redis.replicas }}
+      resources: {{ toYaml .Values.redis.resources | nindent 8 }}
+      serviceAccountName: {{.Values.redis.name}}
+      switchPolicy:
+        type: Noop
+      volumeClaimTemplates:
+        - name: data
+          spec:
+            accessModes:
+              - ReadWriteOnce
+            resources:
+              requests:
+                storage: {{ .Values.redis.storage }}
+    {{- if gt (int .Values.redis.sentinelReplicas) 0 }}
+    - componentDefRef: redis-sentinel
+      monitor: true
+      name: redis-sentinel
+      replicas: {{ .Values.redis.sentinelReplicas }}
+      resources: {{ toYaml .Values.redis.sentinelResources | nindent 8 }}
+      serviceAccountName: {{.Values.redis.name}}
+      switchPolicy:
+        type: Noop
+      volumeClaimTemplates:
+        - name: data
+          spec:
+            accessModes:
+              - ReadWriteOnce
+            resources:
+              requests:
+                storage: {{ .Values.redis.storage }}
+    {{- end }}
+  terminationPolicy: Delete

+ 86 - 0
core/deploy/charts/aiproxy-database/templates/aiproxy.yaml

@@ -0,0 +1,86 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.pgsql.name}}
+    app.kubernetes.io/instance: {{.Values.pgsql.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.pgsql.name}}
+  namespace: {{.Release.Namespace}}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.pgsql.name}}
+    app.kubernetes.io/instance: {{.Values.pgsql.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.pgsql.name}}
+  namespace: {{.Release.Namespace}}
+rules:
+  - apiGroups:
+      - '*'
+    resources:
+      - '*'
+    verbs:
+      - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    sealos-db-provider-cr: {{.Values.pgsql.name}}
+    app.kubernetes.io/instance: {{.Values.pgsql.name}}
+    app.kubernetes.io/managed-by: kbcli
+  name: {{.Values.pgsql.name}}
+  namespace: {{.Release.Namespace}}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{.Values.pgsql.name}}
+subjects:
+  - kind: ServiceAccount
+    name: {{.Values.pgsql.name}}
+    namespace: {{.Release.Namespace}}
+---
+apiVersion: apps.kubeblocks.io/v1alpha1
+kind: Cluster
+metadata:
+  labels:
+    clusterdefinition.kubeblocks.io/name: postgresql
+    clusterversion.kubeblocks.io/name: {{ .Values.pgsql.version }}
+  name: {{.Values.pgsql.name}}
+  namespace: {{.Release.Namespace}}
+spec:
+  affinity:
+    nodeLabels: { }
+    podAntiAffinity: Preferred
+    tenancy: SharedNode
+    topologyKeys:
+      - kubernetes.io/hostname
+  backup:
+    cronExpression: {{.Values.pgsql.backup.cron}}
+    enabled: true
+    method: dump
+    pitrEnabled: true
+    retentionPeriod: {{.Values.pgsql.backup.retentionPeriod}}
+  clusterDefinitionRef: postgresql
+  clusterVersionRef: {{ .Values.pgsql.version }}
+  componentSpecs:
+    - componentDefRef: postgresql
+      monitor: true
+      name: postgresql
+      replicas: {{ .Values.pgsql.replicas }}
+      resources: {{ toYaml .Values.pgsql.resources | nindent 8 }}
+      serviceAccountName: {{.Values.pgsql.name}}
+      switchPolicy:
+        type: Noop
+      volumeClaimTemplates:
+        - name: data
+          spec:
+            accessModes:
+              - ReadWriteOnce
+            resources:
+              requests:
+                storage: {{ .Values.pgsql.storage }}
+  terminationPolicy: Delete

+ 55 - 0
core/deploy/charts/aiproxy-database/values.yaml

@@ -0,0 +1,55 @@
+pgsql:
+  name: aiproxy
+  backup:
+    cron: "0 7 * * *"
+    retentionPeriod: 7d
+  version: "postgresql-14.8.0"
+  replicas: 2
+  storage: 3Gi
+  resources:
+    limits:
+      cpu: 1000m
+      memory: 1024Mi
+    requests:
+      cpu: 10m
+      memory: 50Mi
+
+pgsqlLog:
+  name: aiproxy-log
+  backup:
+    cron: "0 7 * * *"
+    retentionPeriod: 7d
+  version: "postgresql-14.8.0"
+  replicas: 2
+  storage: 3Gi
+  resources:
+    limits:
+      cpu: 1000m
+      memory: 1024Mi
+    requests:
+      cpu: 10m
+      memory: 50Mi
+
+redis:
+  name: aiproxy-redis
+  backup:
+    cron: "0 7 * * *"
+    retentionPeriod: 7d
+  version: "redis-7.0.6"
+  replicas: 3
+  sentinelReplicas: 3
+  storage: 3Gi
+  sentinelResources:
+    limits:
+      cpu: 100m
+      memory: 100Mi
+    requests:
+      cpu: 100m
+      memory: 100Mi
+  resources:
+    limits:
+      cpu: 1000m
+      memory: 1024Mi
+    requests:
+      cpu: 10m
+      memory: 52Mi

+ 23 - 0
core/deploy/charts/aiproxy/.helmignore

@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/

+ 24 - 0
core/deploy/charts/aiproxy/Chart.yaml

@@ -0,0 +1,24 @@
+apiVersion: v2
+name: aiproxy
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.16.0"

+ 0 - 0
core/deploy/charts/aiproxy/templates/NOTES.txt


+ 58 - 0
core/deploy/charts/aiproxy/templates/_helpers.tpl

@@ -0,0 +1,58 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "aiproxy.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "aiproxy.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "aiproxy.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "aiproxy.labels" -}}
+helm.sh/chart: {{ include "aiproxy.chart" . }}
+{{ include "aiproxy.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "aiproxy.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "aiproxy.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "aiproxy.serviceAccountName" -}}
+{{- default (include "aiproxy.fullname" .) .Values.serviceAccount.name }}
+{{- end }}

+ 21 - 0
core/deploy/charts/aiproxy/templates/app.yaml

@@ -0,0 +1,21 @@
+{{- if  .Values.sealos }}
+---
+kind: App
+metadata:
+  name: aiproxy
+  namespace: ns-admin
+spec:
+  data:
+    desc: sealos cloud aiproxy admin
+    url: https://aiproxy.{{.Values.cloudDomain}}
+  displayType: normal
+  i18n:
+    zh:
+      name: AI Proxy
+    zh-Hans:
+      name: AI Proxy
+  icon: https://aiproxy.{{.Values.cloudDomain}}/logo.svg
+  name: Sealos AI Proxy Admin
+  type: iframe
+apiVersion: app.sealos.io/v1
+{{- end }}

+ 8 - 0
core/deploy/charts/aiproxy/templates/configmap.yaml

@@ -0,0 +1,8 @@
+apiVersion: v1
+data: {{ toYaml .Values.aiproxy | nindent 2 }}
+kind: ConfigMap
+metadata:
+  namespace: {{.Release.Namespace}}
+  name: {{ include "aiproxy.fullname" . }}-env
+  labels:
+    {{- include "aiproxy.labels" . | nindent 4 }}

+ 65 - 0
core/deploy/charts/aiproxy/templates/deploy.yaml

@@ -0,0 +1,65 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  namespace: {{ .Release.Namespace }}
+  name: {{ include "aiproxy.fullname" . }}
+  labels:
+    {{- include "aiproxy.labels" . | nindent 4 }}
+spec:
+  replicas: {{.Values.replicas}}
+  strategy:
+    type: RollingUpdate
+    rollingUpdate:
+      maxUnavailable: 0
+      maxSurge: 1
+  selector:
+    matchLabels:
+      {{ include "aiproxy.selectorLabels" . | nindent 6 }}
+  template:
+    metadata:
+      annotations:
+        {{- with .Values.podAnnotations }}
+        {{ toYaml . | nindent 8 }}
+        {{- end }}
+        checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+      labels:
+        {{ include "aiproxy.labels" . | nindent 8 }}
+        {{- with .Values.podLabels }}
+        {{ toYaml . | nindent 8 }}
+        {{- end }}
+    spec:
+      terminationGracePeriodSeconds: 600
+      containers:
+        - image: {{.Values.image}}
+          imagePullPolicy: IfNotPresent
+          name: aiproxy
+          resources: {{ toYaml .Values.resources | nindent 12 }}
+          envFrom:
+            - configMapRef:
+                name: {{ include "aiproxy.fullname" . }}-env
+          ports:
+            - name: http
+              containerPort: 3000
+              protocol: TCP
+          startupProbe:
+            httpGet:
+              port: http
+              path: /api/status
+            initialDelaySeconds: 5
+            periodSeconds: 3
+            failureThreshold: 30
+            successThreshold: 1
+            timeoutSeconds: 1
+---
+apiVersion: v1
+kind: Service
+metadata:
+  namespace: {{ .Release.Namespace }}
+  name: {{ include "aiproxy.fullname" . }}
+  labels:
+    {{- include "aiproxy.labels" . | nindent 4 }}
+spec:
+  ports:
+    - port: 3000
+      targetPort: 3000
+  selector: {{ include "aiproxy.selectorLabels" . | nindent 4 }}

+ 39 - 0
core/deploy/charts/aiproxy/templates/ingress.yaml

@@ -0,0 +1,39 @@
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  annotations:
+    kubernetes.io/ingress.class: nginx
+    nginx.ingress.kubernetes.io/enable-cors: "true"
+    nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, DELETE, PATCH, OPTIONS"
+    nginx.ingress.kubernetes.io/cors-allow-origin: "https://{{.Values.cloudDomain}}:{{.Values.cloudPort}}, https://*.{{.Values.cloudDomain}}:{{.Values.cloudPort}}"
+    nginx.ingress.kubernetes.io/cors-allow-credentials: "true"
+    nginx.ingress.kubernetes.io/cors-max-age: "600"
+    nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
+    nginx.ingress.kubernetes.io/configuration-snippet: |
+      more_clear_headers "X-Frame-Options:";
+      more_set_headers "Content-Security-Policy: default-src * blob: data: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}}; img-src * data: blob: resource: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}}; connect-src * wss: blob: resource:; style-src 'self' 'unsafe-inline' blob: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}} resource:; script-src 'self' 'unsafe-inline' 'unsafe-eval' blob: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}} resource: *.baidu.com *.bdstatic.com https://js.stripe.com; frame-src 'self' *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}} mailto: tel: weixin: mtt: *.baidu.com https://js.stripe.com; frame-ancestors 'self' https://{{.Values.cloudDomain}}:{{.Values.cloudPort}} https://*.{{.Values.cloudDomain}}:{{.Values.cloudPort}}";
+      more_set_headers "X-Xss-Protection: 1; mode=block";
+    higress.io/response-header-control-remove: X-Frame-Options
+    higress.io/response-header-control-update: |
+      Content-Security-Policy "default-src * blob: data: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}}; img-src * data: blob: resource: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}}; connect-src * wss: blob: resource:; style-src 'self' 'unsafe-inline' blob: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}} resource:; script-src 'self' 'unsafe-inline' 'unsafe-eval' blob: *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}} resource: *.baidu.com *.bdstatic.com https://js.stripe.com; frame-src 'self' *.{{.Values.cloudDomain}}:{{.Values.cloudPort}} {{.Values.cloudDomain}}:{{.Values.cloudPort}} mailto: tel: weixin: mtt: *.baidu.com https://js.stripe.com; frame-ancestors 'self' https://{{.Values.cloudDomain}}:{{.Values.cloudPort}} https://*.{{.Values.cloudDomain}}:{{.Values.cloudPort}}"
+      X-Xss-Protection "1; mode=block"
+  namespace: {{.Release.Namespace}}
+  name: {{ include "aiproxy.fullname" . }}
+  labels:
+    {{ include "aiproxy.labels" . | nindent 4 }}
+spec:
+  rules:
+    - host: aiproxy.{{.Values.cloudDomain}}
+      http:
+        paths:
+          - pathType: Prefix
+            path: /
+            backend:
+              service:
+                name: aiproxy
+                port:
+                  number: 3000
+  tls:
+    - hosts:
+        - 'aiproxy.{{.Values.cloudDomain}}'
+      secretName: wildcard-cert

+ 31 - 0
core/deploy/charts/aiproxy/values.yaml

@@ -0,0 +1,31 @@
+fullnameOverride: aiproxy
+
+replicas: 3
+resources:
+  requests:
+    cpu: 50m
+    memory: 50Mi
+  limits:
+    cpu: 500m
+    memory: 512Mi
+
+image: ghcr.io/labring/aiproxy:latest
+
+cloudDomain: 127.0.0.1.nip.io
+cloudPort: 443
+
+sealos: true
+
+aiproxy:
+  DEBUG: "false"
+  DEBUG_SQL: "false"
+  ADMIN_KEY: ""
+  SEALOS_JWT_KEY: ""
+  SQL_DSN: ""
+  LOG_SQL_DSN: ""
+  REDIS: ""
+  BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE: "false"
+  BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT: "1"
+  SAVE_ALL_LOG_DETAIL: "false"
+  LOG_DETAIL_REQUEST_BODY_MAX_SIZE: "128"
+  LOG_DETAIL_RESPONSE_BODY_MAX_SIZE: "128"

+ 79 - 0
core/deploy/install.sh

@@ -0,0 +1,79 @@
+#!/bin/bash
+timestamp() {
+  date +"%Y-%m-%d %T"
+}
+print() {
+  flag=$(timestamp)
+  echo -e "\033[1;32m\033[1m INFO [$flag] >> $* \033[0m"
+}
+warn() {
+  flag=$(timestamp)
+  echo -e "\033[33m WARN [$flag] >> $* \033[0m"
+}
+info() {
+  flag=$(timestamp)
+  echo -e "\033[36m INFO [$flag] >> $* \033[0m"
+}
+wait_for_secret() {
+  local secret_name=$1
+  local namespace=${2:-hubble-service}
+
+  info "Checking if secret $secret_name exists..."
+
+  while ! kubectl get secret "$secret_name" -n "$namespace" > /dev/null 2>&1; do
+    warn "Secret $secret_name does not exist, retrying in 5 seconds..."
+    sleep 5
+  done
+
+  info "Secret $secret_name exists, proceeding with the next steps."
+}
+
+#===========================================================================
+HELM_OPTS=${HELM_OPTS:-""}
+
+NODE_COUNT=$(kubectl get nodes --no-headers | wc -l)
+REPLICA_OPTIONS=""
+if [ "$NODE_COUNT" -eq 1 ]; then
+  REPLICA_OPTIONS="--set pgsql.replicas=1 --set pgsqlLog.replicas=1 --set redis.replicas=1 --set redis.sentinelReplicas=0 --set replicas=1 "
+  HELM_OPTS="${HELM_OPTS} ${REPLICA_OPTIONS}"
+fi
+
+helm upgrade -i aiproxy-database -n aiproxy-system charts/aiproxy-database  ${HELM_OPTS} --wait
+
+wait_for_secret "aiproxy-conn-credential" "aiproxy-system"
+wait_for_secret "aiproxy-log-conn-credential" "aiproxy-system"
+wait_for_secret "aiproxy-redis-conn-credential" "aiproxy-system"
+
+AIPROXY_USER=$(kubectl get secret -n aiproxy-system aiproxy-conn-credential -ojsonpath="{.data.username}" | base64 -d)
+AIPROXY_PASSWORD=$(kubectl get secret -n aiproxy-system aiproxy-conn-credential -ojsonpath="{.data.password}" | base64 -d)
+AIPROXY_PORT=$(kubectl get secret -n aiproxy-system aiproxy-conn-credential -ojsonpath="{.data.port}" | base64 -d)
+AIPROXY_HOST=$(kubectl get secret -n aiproxy-system aiproxy-conn-credential -ojsonpath="{.data.host}" | base64 -d).aiproxy-system.svc
+AIPROXY_URI="postgres://${AIPROXY_USER}:${AIPROXY_PASSWORD}@${AIPROXY_HOST}:${AIPROXY_PORT}/postgres?sslmode=disable"
+
+LOG_USER=$(kubectl get secret -n aiproxy-system aiproxy-log-conn-credential -ojsonpath="{.data.username}" | base64 -d)
+LOG_PASSWORD=$(kubectl get secret -n aiproxy-system aiproxy-log-conn-credential -ojsonpath="{.data.password}" | base64 -d)
+LOG_PORT=$(kubectl get secret -n aiproxy-system aiproxy-log-conn-credential -ojsonpath="{.data.port}" | base64 -d)
+LOG_HOST=$(kubectl get secret -n aiproxy-system aiproxy-log-conn-credential -ojsonpath="{.data.host}" | base64 -d).aiproxy-system.svc
+LOG_URI="postgres://${LOG_USER}:${LOG_PASSWORD}@${LOG_HOST}:${LOG_PORT}/postgres?sslmode=disable"
+
+REDIS_USER=$(kubectl get secret -n aiproxy-system aiproxy-redis-conn-credential -ojsonpath="{.data.username}" | base64 -d)
+REDIS_PASSWORD=$(kubectl get secret -n aiproxy-system aiproxy-redis-conn-credential -ojsonpath="{.data.password}" | base64 -d)
+REDIS_PORT=$(kubectl get secret -n aiproxy-system aiproxy-redis-conn-credential -ojsonpath="{.data.port}" | base64 -d)
+REDIS_HOST=$(kubectl get secret -n aiproxy-system aiproxy-redis-conn-credential -ojsonpath="{.data.host}" | base64 -d).aiproxy-system.svc
+REDIS_URI="redis://${REDIS_USER}:${REDIS_PASSWORD}@${REDIS_HOST}:${REDIS_PORT}"
+
+varJwtInternal=$(kubectl get configmap sealos-config -n sealos-system -o jsonpath='{.data.jwtInternal}')
+adminKey=$(kubectl get configmap aiproxy-env -n aiproxy-system -o jsonpath='{.data.ADMIN_KEY}' )
+if [ -z "$adminKey" ]; then
+  print "adminKey is empty, generating new credentials."
+  adminKey=$(openssl rand -hex 64 | head -c 32)
+fi
+kubectl delete configmap aiproxy-env -n aiproxy-system --ignore-not-found
+kubectl delete ingress -n aiproxy-system aiproxy --ignore-not-found
+kubectl delete deployment -n aiproxy-system aiproxy --ignore-not-found
+kubectl delete service -n aiproxy-system aiproxy --ignore-not-found
+
+SEALOS_CLOUD_DOMAIN=$(kubectl get configmap sealos-config -n sealos-system -o jsonpath='{.data.cloudDomain}')
+SEALOS_CLOUD_PORT=$(kubectl get configmap sealos-config -n sealos-system -o jsonpath='{.data.cloudPort}')
+helm upgrade -i aiproxy -n aiproxy-system charts/aiproxy  ${HELM_OPTS} --set aiproxy.SQL_DSN=${AIPROXY_URI} --set aiproxy.LOG_SQL_DSN=${LOG_URI}  --set aiproxy.REDIS=${REDIS_URI} \
+  --set aiproxy.SEALOS_JWT_KEY=${varJwtInternal}  --set aiproxy.ADMIN_KEY=${adminKey} --set cloudDomain=${SEALOS_CLOUD_DOMAIN} --set cloudPort=${SEALOS_CLOUD_PORT}

+ 0 - 17
core/deploy/manifests/aiproxy-config.yaml.tmpl

@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aiproxy-env
-data:
-  DEBUG: "false"
-  DEBUG_SQL: "false"
-  ADMIN_KEY: "{{ .ADMIN_KEY }}"
-  SEALOS_JWT_KEY: "{{ .SEALOS_JWT_KEY }}"
-  SQL_DSN: "{{ .SQL_DSN }}"
-  LOG_SQL_DSN: "{{ .LOG_SQL_DSN }}"
-  REDIS: "{{ .REDIS }}"
-  BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE: "{{ .BALANCE_SEALOS_CHECK_REAL_NAME_ENABLE }}"
-  BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT: "{{ .BALANCE_SEALOS_NO_REAL_NAME_USED_AMOUNT_LIMIT }}"
-  SAVE_ALL_LOG_DETAIL: "{{ .SAVE_ALL_LOG_DETAIL }}"
-  LOG_DETAIL_REQUEST_BODY_MAX_SIZE: "{{ .LOG_DETAIL_REQUEST_BODY_MAX_SIZE }}"
-  LOG_DETAIL_RESPONSE_BODY_MAX_SIZE: "{{ .LOG_DETAIL_RESPONSE_BODY_MAX_SIZE }}"

+ 0 - 70
core/deploy/manifests/deploy.yaml.tmpl

@@ -1,70 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: aiproxy
-  namespace: aiproxy-system
-  labels:
-    cloud.sealos.io/app-deploy-manager: aiproxy
-spec:
-  ports:
-    - port: 3000
-      targetPort: 3000
-  selector:
-    app: aiproxy
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: aiproxy
-  namespace: aiproxy-system
-  annotations:
-    originImageName: ghcr.io/labring/aiproxy:latest
-    deploy.cloud.sealos.io/minReplicas: '3'
-    deploy.cloud.sealos.io/maxReplicas: '3'
-  labels:
-    cloud.sealos.io/app-deploy-manager: aiproxy
-    app: aiproxy
-spec:
-  replicas: 3
-  revisionHistoryLimit: 1
-  selector:
-    matchLabels:
-      app: aiproxy
-  strategy:
-    type: RollingUpdate
-    rollingUpdate:
-      maxUnavailable: 0
-      maxSurge: 1
-  template:
-    metadata:
-      labels:
-        app: aiproxy
-    spec:
-      terminationGracePeriodSeconds: 600
-      containers:
-        - name: aiproxy
-          image: ghcr.io/labring/aiproxy:latest
-          envFrom:
-          - configMapRef:
-              name: aiproxy-env
-          resources:
-            requests:
-              cpu: 50m
-              memory: 50Mi
-            limits:
-              cpu: 500m
-              memory: 512Mi
-          ports:
-            - containerPort: 3000
-          imagePullPolicy: Always
-          startupProbe:
-            httpGet:
-              port: 3000
-              path: /api/status
-            initialDelaySeconds: 5
-            periodSeconds: 3
-            failureThreshold: 30
-            successThreshold: 1
-            timeoutSeconds: 1
-      serviceAccountName: default
-      automountServiceAccountToken: false

+ 0 - 37
core/deploy/manifests/ingress.yaml.tmpl

@@ -1,37 +0,0 @@
-apiVersion: networking.k8s.io/v1
-kind: Ingress
-metadata:
-  annotations:
-    kubernetes.io/ingress.class: nginx
-    nginx.ingress.kubernetes.io/enable-cors: "true"
-    nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, DELETE, PATCH, OPTIONS"
-    nginx.ingress.kubernetes.io/cors-allow-origin: "https://{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}, https://*.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}"
-    nginx.ingress.kubernetes.io/cors-allow-credentials: "true"
-    nginx.ingress.kubernetes.io/cors-max-age: "600"
-    nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
-    nginx.ingress.kubernetes.io/configuration-snippet: |
-      more_clear_headers "X-Frame-Options:";
-      more_set_headers "Content-Security-Policy: default-src * blob: data: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}; img-src * data: blob: resource: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}; connect-src * wss: blob: resource:; style-src 'self' 'unsafe-inline' blob: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} resource:; script-src 'self' 'unsafe-inline' 'unsafe-eval' blob: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} resource: *.baidu.com *.bdstatic.com https://js.stripe.com; frame-src 'self' *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} mailto: tel: weixin: mtt: *.baidu.com https://js.stripe.com; frame-ancestors 'self' https://{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} https://*.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}";
-      more_set_headers "X-Xss-Protection: 1; mode=block";
-    higress.io/response-header-control-remove: X-Frame-Options
-    higress.io/response-header-control-update: |
-      Content-Security-Policy "default-src * blob: data: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}; img-src * data: blob: resource: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}; connect-src * wss: blob: resource:; style-src 'self' 'unsafe-inline' blob: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} resource:; script-src 'self' 'unsafe-inline' 'unsafe-eval' blob: *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} resource: *.baidu.com *.bdstatic.com https://js.stripe.com; frame-src 'self' *.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} {{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} mailto: tel: weixin: mtt: *.baidu.com https://js.stripe.com; frame-ancestors 'self' https://{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }} https://*.{{ .cloudDomain }}{{ if .cloudPort }}:{{ .cloudPort }}{{ end }}"
-      X-Xss-Protection "1; mode=block"
-  name: aiproxy
-  namespace: aiproxy-system
-spec:
-  rules:
-    - host: aiproxy.{{ .cloudDomain }}
-      http:
-        paths:
-          - pathType: Prefix
-            path: /v1
-            backend:
-              service:
-                name: aiproxy
-                port:
-                  number: 3000
-  tls:
-    - hosts:
-        - 'aiproxy.{{ .cloudDomain }}'
-      secretName: {{ .certSecretName }}

+ 0 - 94
core/deploy/manifests/pgsql-log.yaml

@@ -1,94 +0,0 @@
-apiVersion: apps.kubeblocks.io/v1alpha1
-kind: Cluster
-metadata:
-  finalizers:
-    - cluster.kubeblocks.io/finalizer
-  labels:
-    clusterdefinition.kubeblocks.io/name: postgresql
-    clusterversion.kubeblocks.io/name: postgresql-14.8.0
-    sealos-db-provider-cr: aiproxy-log
-  annotations: {}
-  name: aiproxy-log
-  namespace: aiproxy-system
-spec:
-  affinity:
-    nodeLabels: {}
-    podAntiAffinity: Preferred
-    tenancy: SharedNode
-    topologyKeys:
-      - kubernetes.io/hostname
-  clusterDefinitionRef: postgresql
-  clusterVersionRef: postgresql-14.8.0
-  componentSpecs:
-    - componentDefRef: postgresql
-      monitor: true
-      name: postgresql
-      replicas: 2
-      resources:
-        limits:
-          cpu: 1000m
-          memory: 1024Mi
-        requests:
-          cpu: 100m
-          memory: 102Mi
-      serviceAccountName: aiproxy-log
-      switchPolicy:
-        type: Noop
-      volumeClaimTemplates:
-        - name: data
-          spec:
-            accessModes:
-              - ReadWriteOnce
-            resources:
-              requests:
-                storage: 10Gi
-  terminationPolicy: Delete
-  tolerations: []
-
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy-log
-    app.kubernetes.io/instance: aiproxy-log
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy-log
-  namespace: aiproxy-system
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy-log
-    app.kubernetes.io/instance: aiproxy-log
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy-log
-  namespace: aiproxy-system
-rules:
-  - apiGroups:
-      - '*'
-    resources:
-      - '*'
-    verbs:
-      - '*'
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy-log
-    app.kubernetes.io/instance: aiproxy-log
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy-log
-  namespace: aiproxy-system
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: aiproxy-log
-subjects:
-  - kind: ServiceAccount
-    name: aiproxy-log
-    namespace: aiproxy-system

+ 0 - 94
core/deploy/manifests/pgsql.yaml

@@ -1,94 +0,0 @@
-apiVersion: apps.kubeblocks.io/v1alpha1
-kind: Cluster
-metadata:
-  finalizers:
-    - cluster.kubeblocks.io/finalizer
-  labels:
-    clusterdefinition.kubeblocks.io/name: postgresql
-    clusterversion.kubeblocks.io/name: postgresql-14.8.0
-    sealos-db-provider-cr: aiproxy
-  annotations: {}
-  name: aiproxy
-  namespace: aiproxy-system
-spec:
-  affinity:
-    nodeLabels: {}
-    podAntiAffinity: Preferred
-    tenancy: SharedNode
-    topologyKeys:
-      - kubernetes.io/hostname
-  clusterDefinitionRef: postgresql
-  clusterVersionRef: postgresql-14.8.0
-  componentSpecs:
-    - componentDefRef: postgresql
-      monitor: true
-      name: postgresql
-      replicas: 2
-      resources:
-        limits:
-          cpu: 1000m
-          memory: 1024Mi
-        requests:
-          cpu: 100m
-          memory: 102Mi
-      serviceAccountName: aiproxy
-      switchPolicy:
-        type: Noop
-      volumeClaimTemplates:
-        - name: data
-          spec:
-            accessModes:
-              - ReadWriteOnce
-            resources:
-              requests:
-                storage: 3Gi
-  terminationPolicy: Delete
-  tolerations: []
-
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy
-    app.kubernetes.io/instance: aiproxy
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy
-  namespace: aiproxy-system
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy
-    app.kubernetes.io/instance: aiproxy
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy
-  namespace: aiproxy-system
-rules:
-  - apiGroups:
-      - "*"
-    resources:
-      - "*"
-    verbs:
-      - "*"
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy
-    app.kubernetes.io/instance: aiproxy
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy
-  namespace: aiproxy-system
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: aiproxy
-subjects:
-  - kind: ServiceAccount
-    name: aiproxy
-    namespace: aiproxy-system

+ 0 - 107
core/deploy/manifests/redis.yaml

@@ -1,107 +0,0 @@
-apiVersion: apps.kubeblocks.io/v1alpha1
-kind: Cluster
-metadata:
-  finalizers:
-    - cluster.kubeblocks.io/finalizer
-  labels:
-    clusterdefinition.kubeblocks.io/name: redis
-    clusterversion.kubeblocks.io/name: redis-7.0.6
-    sealos-db-provider-cr: aiproxy-redis
-  annotations: {}
-  name: aiproxy-redis
-  namespace: aiproxy-system
-spec:
-  affinity:
-    nodeLabels: {}
-    podAntiAffinity: Preferred
-    tenancy: SharedNode
-    topologyKeys:
-      - kubernetes.io/hostname
-  clusterDefinitionRef: redis
-  clusterVersionRef: redis-7.0.6
-  componentSpecs:
-    - componentDefRef: redis
-      monitor: true
-      name: redis
-      replicas: 3
-      resources:
-        limits:
-          cpu: 1000m
-          memory: 1024Mi
-        requests:
-          cpu: 100m
-          memory: 102Mi
-      serviceAccountName: aiproxy-redis
-      switchPolicy:
-        type: Noop
-      volumeClaimTemplates:
-        - name: data
-          spec:
-            accessModes:
-              - ReadWriteOnce
-            resources:
-              requests:
-                storage: 3Gi
-            storageClassName: openebs-backup
-    - componentDefRef: redis-sentinel
-      monitor: true
-      name: redis-sentinel
-      replicas: 3
-      resources:
-        limits:
-          cpu: 100m
-          memory: 100Mi
-        requests:
-          cpu: 100m
-          memory: 100Mi
-      serviceAccountName: aiproxy-redis
-  terminationPolicy: Delete
-  tolerations: []
-
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy-redis
-    app.kubernetes.io/instance: aiproxy-redis
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy-redis
-  namespace: aiproxy-system
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy-redis
-    app.kubernetes.io/instance: aiproxy-redis
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy-redis
-  namespace: aiproxy-system
-rules:
-  - apiGroups:
-      - '*'
-    resources:
-      - '*'
-    verbs:
-      - '*'
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  labels:
-    sealos-db-provider-cr: aiproxy-redis
-    app.kubernetes.io/instance: aiproxy-redis
-    app.kubernetes.io/managed-by: kbcli
-  name: aiproxy-redis
-  namespace: aiproxy-system
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: aiproxy-redis
-subjects:
-  - kind: ServiceAccount
-    name: aiproxy-redis
-    namespace: aiproxy-system

+ 0 - 111
core/deploy/scripts/init.sh

@@ -1,111 +0,0 @@
-#!/bin/bash
-set -e
-
-# Create namespace
-kubectl create ns aiproxy-system || true
-
-# Function to wait for secret
-wait_for_secret() {
-  local secret_name=$1
-  local retries=0
-  while ! kubectl get secret -n aiproxy-system ${secret_name} >/dev/null 2>&1; do
-    sleep 3
-    retries=$((retries + 1))
-    if [ $retries -ge 30 ]; then
-      echo "Timeout waiting for secret ${secret_name}"
-      exit 1
-    fi
-  done
-}
-
-# Function to get secret value
-get_secret_value() {
-  local secret_name=$1
-  local key=$2
-  base64_value=$(kubectl get secret -n aiproxy-system ${secret_name} -o jsonpath="{.data.${key}}") || return $?
-  echo "$base64_value" | base64 -d
-}
-
-# Function to build postgres connection string
-build_postgres_dsn() {
-  local secret_name=$1
-  username=$(get_secret_value ${secret_name} "username") || return $?
-  password=$(get_secret_value ${secret_name} "password") || return $?
-  host=$(get_secret_value ${secret_name} "host") || return $?
-  port=$(get_secret_value ${secret_name} "port") || return $?
-  echo "postgres://${username}:${password}@${host}:${port}/postgres?sslmode=disable"
-}
-
-build_redis_conn() {
-  local secret_name=$1
-  username=$(get_secret_value ${secret_name} "username") || return $?
-  password=$(get_secret_value ${secret_name} "password") || return $?
-  host=$(get_secret_value ${secret_name} "host") || return $?
-  port=$(get_secret_value ${secret_name} "port") || return $?
-  echo "redis://${username}:${password}@${host}:${port}"
-}
-
-# Handle JWT configuration
-if grep "<sealos-jwt-key-placeholder>" manifests/aiproxy-config.yaml >/dev/null 2>&1; then
-  JWT_SECRET=$(kubectl get cm -n account-system account-manager-env -o jsonpath="{.data.ACCOUNT_API_JWT_SECRET}") || exit $?
-  sed -i "s|<sealos-jwt-key-placeholder>|${JWT_SECRET}|g" manifests/aiproxy-config.yaml
-fi
-
-# Handle PostgreSQL configuration
-if grep "<sql-placeholder>" manifests/aiproxy-config.yaml >/dev/null 2>&1; then
-  if grep "<sql-log-placeholder>" manifests/aiproxy-config.yaml >/dev/null 2>&1; then
-    # Deploy PostgreSQL resources
-    kubectl apply -f manifests/pgsql.yaml -n aiproxy-system
-    kubectl apply -f manifests/pgsql-log.yaml -n aiproxy-system
-
-    # Wait for secrets
-    wait_for_secret "aiproxy-conn-credential"
-    wait_for_secret "aiproxy-log-conn-credential"
-
-    # Build connection strings
-    SQL_DSN=$(build_postgres_dsn "aiproxy-conn-credential") || exit $?
-    LOG_SQL_DSN=$(build_postgres_dsn "aiproxy-log-conn-credential") || exit $?
-
-    # Update config
-    sed -i "s|<sql-placeholder>|${SQL_DSN}|g" manifests/aiproxy-config.yaml
-    sed -i "s|<sql-log-placeholder>|${LOG_SQL_DSN}|g" manifests/aiproxy-config.yaml
-  elif grep "LOG_SQL_DSN: \"\"" manifests/aiproxy-config.yaml >/dev/null 2>&1; then
-    # Deploy PostgreSQL resources
-    kubectl apply -f manifests/pgsql.yaml -n aiproxy-system
-
-    # Wait for secrets
-    wait_for_secret "aiproxy-conn-credential"
-
-    # Build connection strings
-    SQL_DSN=$(build_postgres_dsn "aiproxy-conn-credential") || exit $?
-
-    # Update config
-    sed -i "s|<sql-placeholder>|${SQL_DSN}|g" manifests/aiproxy-config.yaml
-  else
-    echo "Error: LOG_SQL_DSN is not allowed to be passed alone, please provide both SQL_DSN and LOG_SQL_DSN or provide SQL_DSN only or neither."
-    exit 1
-  fi
-elif grep "<sql-log-placeholder>" manifests/aiproxy-config.yaml >/dev/null 2>&1; then
-  sed -i 's/<sql-log-placeholder>//g' manifests/aiproxy-config.yaml
-fi
-
-# Handle Redis configuration
-if grep "<redis-placeholder>" manifests/aiproxy-config.yaml >/dev/null 2>&1; then
-  kubectl apply -f manifests/redis.yaml -n aiproxy-system
-
-  wait_for_secret "aiproxy-redis-conn-credential"
-
-  # Build redis connection string
-  REDIS_CONN=$(build_redis_conn "aiproxy-redis-conn-credential") || exit $?
-
-  sed -i "s|<redis-placeholder>|${REDIS_CONN}|g" manifests/aiproxy-config.yaml
-fi
-
-# Deploy application
-kubectl apply -f manifests/aiproxy-config.yaml -n aiproxy-system
-kubectl apply -f manifests/deploy.yaml -n aiproxy-system
-
-# Create ingress if domain is specified
-if [[ -n "$cloudDomain" ]]; then
-  kubectl create -f manifests/ingress.yaml -n aiproxy-system || true
-fi