diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 68c3ea150..0949eb358 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -6,10 +6,9 @@ updates: interval: "cron" cronjob: "0 0 * * *" groups: - action-minor: - update-types: - - minor - - patch + actions-major: + patterns: + - "*" - package-ecosystem: "gomod" directories: - "/" @@ -28,10 +27,9 @@ updates: interval: "cron" cronjob: "0 0 * * *" groups: - npm-minor: - update-types: - - minor - - patch + npm-major: + patterns: + - "*" - package-ecosystem: "docker" directory: "pkg/config/templates" schedule: diff --git a/.github/workflows/api-sync.yml b/.github/workflows/api-sync.yml index 0295e028f..2cde09f1e 100644 --- a/.github/workflows/api-sync.yml +++ b/.github/workflows/api-sync.yml @@ -2,7 +2,8 @@ name: API Sync on: repository_dispatch: - types: [api-sync] + types: + - api-sync workflow_dispatch: # allow manual triggering # Add explicit permissions @@ -57,12 +58,15 @@ jobs: Changes were detected in the generated API code after syncing with the latest spec from infrastructure. branch: sync/api-types base: develop - labels: | - automated pr - api-sync + + - name: Approve a PR + if: steps.check.outputs.has_changes == 'true' + run: gh pr review --approve "${{ steps.cpr.outputs.pull-request-number }}" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Enable Pull Request Automerge if: steps.check.outputs.has_changes == 'true' run: gh pr merge --auto --squash "${{ steps.cpr.outputs.pull-request-number }}" env: - GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index 89933c333..228fbd29c 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -22,24 +22,17 @@ jobs: with: github-token: "${{ secrets.GITHUB_TOKEN }}" - - name: Generate token - id: app-token - uses: actions/create-github-app-token@v2 - with: - app-id: ${{ secrets.APP_ID }} - private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} - # Here the PR gets approved. - name: Approve a PR - if: ${{ steps.meta.outputs.update-type == 'version-update:semver-patch' || (!startsWith(steps.meta.outputs.previous-version, '0.') && steps.meta.outputs.update-type == 'version-update:semver-minor') }} + if: ${{ steps.meta.outputs.update-type == null || steps.meta.outputs.update-type == 'version-update:semver-patch' || (!startsWith(steps.meta.outputs.previous-version, '0.') && steps.meta.outputs.update-type == 'version-update:semver-minor') }} run: gh pr review --approve "${{ github.event.pull_request.html_url }}" env: - GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Finally, this sets the PR to allow auto-merging for patch and minor # updates if all checks pass - name: Enable auto-merge for Dependabot PRs - if: ${{ steps.meta.outputs.update-type == 'version-update:semver-patch' || (!startsWith(steps.meta.outputs.previous-version, '0.') && steps.meta.outputs.update-type == 'version-update:semver-minor') }} + if: ${{ steps.meta.outputs.update-type == null || steps.meta.outputs.update-type == 'version-update:semver-patch' || (!startsWith(steps.meta.outputs.previous-version, '0.') && steps.meta.outputs.update-type == 'version-update:semver-minor') }} run: gh pr merge --auto --squash "${{ github.event.pull_request.html_url }}" env: - GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/mirror-image.yml b/.github/workflows/mirror-image.yml index 73499fe52..abf168796 100644 --- a/.github/workflows/mirror-image.yml +++ b/.github/workflows/mirror-image.yml @@ -1,6 +1,9 @@ name: Mirror Image on: + repository_dispatch: + types: + - mirror-image workflow_call: inputs: image: @@ -15,18 +18,16 @@ on: permissions: contents: read + packages: write + id-token: write jobs: mirror: runs-on: ubuntu-latest - permissions: - contents: read - packages: write - id-token: write steps: - id: strip run: | - TAG=${{ inputs.image }} + TAG=${{ github.event.client_payload.image || inputs.image }} echo "image=${TAG##*/}" >> $GITHUB_OUTPUT - name: configure aws credentials uses: aws-actions/configure-aws-credentials@v5.1.1 @@ -43,7 +44,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - uses: akhilerm/tag-push-action@v2.2.0 with: - src: docker.io/${{ inputs.image }} + src: docker.io/${{ github.event.client_payload.image || inputs.image }} dst: | public.ecr.aws/supabase/${{ steps.strip.outputs.image }} ghcr.io/supabase/${{ steps.strip.outputs.image }} diff --git a/.github/workflows/mirror.yml b/.github/workflows/mirror.yml index 27afb1292..cbe8f17b7 100644 --- a/.github/workflows/mirror.yml +++ b/.github/workflows/mirror.yml @@ -14,9 +14,6 @@ on: # mirrored yet. It's a catch-22! # # TODO: Make the cli start test run *after* we mirror images (if needed). - pull_request_review: - types: - - submitted workflow_dispatch: permissions: @@ -25,7 +22,6 @@ permissions: jobs: setup: runs-on: ubuntu-latest - if: ${{ github.event_name == 'workflow_dispatch' || github.event.review.state == 'approved' }} outputs: tags: ${{ steps.list.outputs.tags }} curr: ${{ steps.curr.outputs.tags }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 80c496f64..79e7ecb69 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -60,18 +60,6 @@ jobs: env: GITHUB_TOKEN: ${{ steps.app-token.outputs.token }} - publish: - name: Publish NPM - needs: - - fast-forward - permissions: - contents: read - id-token: write - uses: ./.github/workflows/tag-npm.yml - with: - release: ${{ needs.fast-forward.outputs.release_tag }} - secrets: inherit - compose: name: Bump self-hosted versions needs: @@ -100,7 +88,6 @@ jobs: needs: - fast-forward - commit - - publish runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 diff --git a/README.md b/README.md index 8fb5d7a31..71b140c74 100644 --- a/README.md +++ b/README.md @@ -23,12 +23,6 @@ Available via [NPM](https://www.npmjs.com) as dev dependency. To install: npm i supabase --save-dev ``` -To install the beta release channel: - -```bash -npm i supabase@beta --save-dev -``` - When installing with yarn 4, you need to disable experimental fetch with the following nodejs config. ``` diff --git a/cmd/link.go b/cmd/link.go index f2422ff48..82b5f4ca3 100644 --- a/cmd/link.go +++ b/cmd/link.go @@ -1,6 +1,7 @@ package cmd import ( + "fmt" "os" "os/signal" @@ -8,6 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/supabase/cli/internal/link" + "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/flags" "golang.org/x/term" ) @@ -39,6 +41,9 @@ var ( cobra.CheckErr(viper.BindPFlag("DB_PASSWORD", cmd.Flags().Lookup("password"))) return link.Run(ctx, flags.ProjectRef, skipPooler, fsys) }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Fprintln(os.Stdout, "Finished "+utils.Aqua("supabase link")+".") + }, } ) diff --git a/docs/supabase/db/pull.md b/docs/supabase/db/pull.md index 8f20f0b8f..1f9f4ae66 100644 --- a/docs/supabase/db/pull.md +++ b/docs/supabase/db/pull.md @@ -4,6 +4,8 @@ Pulls schema changes from a remote database. A new migration file will be create Requires your local project to be linked to a remote database by running `supabase link`. For self-hosted databases, you can pass in the connection parameters using `--db-url` flag. +> Note this command requires Docker Desktop (or a running Docker daemon), as it starts a local Postgres container to diff your remote schema. + Optionally, a new row can be inserted into the migration history table to reflect the current state of the remote database. If no entries exist in the migration history table, `pg_dump` will be used to capture all contents of the remote schemas you have created. Otherwise, this command will only diff schema changes against the remote database, similar to running `db diff --linked`. diff --git a/go.mod b/go.mod index 5ce13247d..da5c8bd80 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/docker/go-connections v0.6.0 github.com/docker/go-units v0.5.0 github.com/fsnotify/fsnotify v1.9.0 - github.com/getsentry/sentry-go v0.39.0 + github.com/getsentry/sentry-go v0.40.0 github.com/go-errors/errors v1.5.1 github.com/go-git/go-git/v5 v5.16.4 github.com/go-playground/validator/v10 v10.28.0 @@ -39,6 +39,7 @@ require ( github.com/mithrandie/csvq-driver v1.7.0 github.com/muesli/reflow v0.3.0 github.com/oapi-codegen/nullable v1.1.0 + github.com/olekukonko/tablewriter v1.1.2 github.com/slack-go/slack v0.17.3 github.com/spf13/afero v1.15.0 github.com/spf13/cobra v1.10.1 @@ -126,6 +127,9 @@ require ( github.com/charmbracelet/x/term v0.2.1 // indirect github.com/chavacava/garif v0.1.0 // indirect github.com/ckaznocha/intrange v0.3.1 // indirect + github.com/clipperhouse/displaywidth v0.6.0 // indirect + github.com/clipperhouse/stringish v0.1.1 // indirect + github.com/clipperhouse/uax29/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/containerd/console v1.0.5 // indirect github.com/containerd/containerd/api v1.9.0 // indirect @@ -271,7 +275,7 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mgechev/revive v1.9.0 // indirect github.com/microcosm-cc/bluemonday v1.0.27 // indirect @@ -315,7 +319,9 @@ require ( github.com/oapi-codegen/runtime v1.1.2 // indirect github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect + github.com/olekukonko/errors v1.1.0 // indirect + github.com/olekukonko/ll v0.1.3 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml v1.9.5 // indirect diff --git a/go.sum b/go.sum index c18199eef..e22db1772 100644 --- a/go.sum +++ b/go.sum @@ -190,6 +190,12 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= +github.com/clipperhouse/displaywidth v0.6.0 h1:k32vueaksef9WIKCNcoqRNyKbyvkvkysNYnAWz2fN4s= +github.com/clipperhouse/displaywidth v0.6.0/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= +github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= +github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= +github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= +github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= @@ -341,8 +347,8 @@ github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIp github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/getkin/kin-openapi v0.131.0 h1:NO2UeHnFKRYhZ8wg6Nyh5Cq7dHk4suQQr72a4pMrDxE= github.com/getkin/kin-openapi v0.131.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= -github.com/getsentry/sentry-go v0.39.0 h1:uhnexj8PNCyCve37GSqxXOeXHh4cJNLNNB4w70Jtgo0= -github.com/getsentry/sentry-go v0.39.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s= +github.com/getsentry/sentry-go v0.40.0 h1:VTJMN9zbTvqDqPwheRVLcp0qcUcM+8eFivvGocAaSbo= +github.com/getsentry/sentry-go v0.40.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s= github.com/ghostiam/protogetter v0.3.15 h1:1KF5sXel0HE48zh1/vn0Loiw25A9ApyseLzQuif1mLY= github.com/ghostiam/protogetter v0.3.15/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= @@ -719,10 +725,9 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -829,8 +834,14 @@ github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//J github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 h1:zrbMGy9YXpIeTnGj4EljqMiZsIcE09mmF8XsD5AYOJc= +github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6/go.mod h1:rEKTHC9roVVicUIfZK7DYrdIoM0EOr8mK1Hj5s3JjH0= +github.com/olekukonko/errors v1.1.0 h1:RNuGIh15QdDenh+hNvKrJkmxxjV4hcS50Db478Ou5sM= +github.com/olekukonko/errors v1.1.0/go.mod h1:ppzxA5jBKcO1vIpCXQ9ZqgDh8iwODz6OXIGKU8r5m4Y= +github.com/olekukonko/ll v0.1.3 h1:sV2jrhQGq5B3W0nENUISCR6azIPf7UBUpVq0x/y70Fg= +github.com/olekukonko/ll v0.1.3/go.mod h1:b52bVQRRPObe+yyBl0TxNfhesL0nedD4Cht0/zx55Ew= +github.com/olekukonko/tablewriter v1.1.2 h1:L2kI1Y5tZBct/O/TyZK1zIE9GlBj/TVs+AY5tZDCDSc= +github.com/olekukonko/tablewriter v1.1.2/go.mod h1:z7SYPugVqGVavWoA2sGsFIoOVNmEHxUAAMrhXONtfkg= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= diff --git a/internal/link/link.go b/internal/link/link.go index 9c7d0ce97..6832876ac 100644 --- a/internal/link/link.go +++ b/internal/link/link.go @@ -13,7 +13,6 @@ import ( "github.com/jackc/pgx/v4" "github.com/spf13/afero" "github.com/supabase/cli/internal/utils" - "github.com/supabase/cli/internal/utils/flags" "github.com/supabase/cli/internal/utils/tenant" "github.com/supabase/cli/pkg/api" "github.com/supabase/cli/pkg/cast" @@ -22,40 +21,18 @@ import ( ) func Run(ctx context.Context, projectRef string, skipPooler bool, fsys afero.Fs, options ...func(*pgx.ConnConfig)) error { - majorVersion := utils.Config.Db.MajorVersion + // 1. Link postgres version if err := checkRemoteProjectStatus(ctx, projectRef, fsys); err != nil { return err } - - // 1. Check service config + // 2. Check service config keys, err := tenant.GetApiKeys(ctx, projectRef) if err != nil { return err } LinkServices(ctx, projectRef, keys.ServiceRole, skipPooler, fsys) - - // 2. Check database connection - if config, err := flags.NewDbConfigWithPassword(ctx, projectRef); err != nil { - fmt.Fprintln(os.Stderr, utils.Yellow("WARN:"), err) - } else if err := linkDatabase(ctx, config, fsys, options...); err != nil { - return err - } - // 3. Save project ref - if err := utils.WriteFile(utils.ProjectRefPath, []byte(projectRef), fsys); err != nil { - return err - } - fmt.Fprintln(os.Stdout, "Finished "+utils.Aqua("supabase link")+".") - - // 4. Suggest config update - if utils.Config.Db.MajorVersion != majorVersion { - fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "Local database version differs from the linked project.") - fmt.Fprintf(os.Stderr, `Update your %s to fix it: -[db] -major_version = %d -`, utils.Bold(utils.ConfigPath), utils.Config.Db.MajorVersion) - } - return nil + return utils.WriteFile(utils.ProjectRefPath, []byte(projectRef), fsys) } func LinkServices(ctx context.Context, projectRef, serviceKey string, skipPooler bool, fsys afero.Fs) { @@ -66,7 +43,7 @@ func LinkServices(ctx context.Context, projectRef, serviceKey string, skipPooler func() error { return linkNetworkRestrictions(ctx, projectRef) }, func() error { return linkPostgrest(ctx, projectRef) }, func() error { return linkGotrue(ctx, projectRef) }, - func() error { return linkStorage(ctx, projectRef) }, + func() error { return linkStorage(ctx, projectRef, fsys) }, func() error { if skipPooler { utils.Config.Db.Pooler.ConnectionString = "" @@ -128,7 +105,7 @@ func linkGotrueVersion(ctx context.Context, api tenant.TenantAPI, fsys afero.Fs) return utils.WriteFile(utils.GotrueVersionPath, []byte(version), fsys) } -func linkStorage(ctx context.Context, projectRef string) error { +func linkStorage(ctx context.Context, projectRef string, fsys afero.Fs) error { resp, err := utils.GetSupabase().V1GetStorageConfigWithResponse(ctx, projectRef) if err != nil { return errors.Errorf("failed to read Storage config: %w", err) @@ -136,7 +113,7 @@ func linkStorage(ctx context.Context, projectRef string) error { return errors.Errorf("unexpected Storage config status %d: %s", resp.StatusCode(), string(resp.Body)) } utils.Config.Storage.FromRemoteStorageConfig(*resp.JSON200) - return nil + return utils.WriteFile(utils.StorageMigrationPath, []byte(utils.Config.Storage.TargetMigration), fsys) } func linkStorageVersion(ctx context.Context, api tenant.TenantAPI, fsys afero.Fs) error { @@ -253,8 +230,24 @@ func checkRemoteProjectStatus(ctx context.Context, projectRef string, fsys afero } // Update postgres image version to match the remote project - if version := resp.JSON200.Database.Version; len(version) > 0 { - return utils.WriteFile(utils.PostgresVersionPath, []byte(version), fsys) + return linkPostgresVersion(resp.JSON200.Database.Version, fsys) +} + +func linkPostgresVersion(version string, fsys afero.Fs) error { + if len(version) == 0 { + return nil } - return nil + majorVersion, err := strconv.ParseUint(strings.Split(version, ".")[0], 10, 7) + if err != nil { + return errors.Errorf("invalid major version: %w", err) + } + if uint64(utils.Config.Db.MajorVersion) != majorVersion { + fmt.Fprintln(os.Stderr, utils.Yellow("WARNING:"), "Local database version differs from the linked project.") + fmt.Fprintf(os.Stderr, `Update your %s to fix it: +[db] +major_version = %d +`, utils.Bold(utils.ConfigPath), majorVersion) + } + utils.Config.Db.MajorVersion = uint(majorVersion) + return utils.WriteFile(utils.PostgresVersionPath, []byte(version), fsys) } diff --git a/internal/seed/buckets/buckets.go b/internal/seed/buckets/buckets.go index 365c3a395..b922e63d8 100644 --- a/internal/seed/buckets/buckets.go +++ b/internal/seed/buckets/buckets.go @@ -3,6 +3,7 @@ package buckets import ( "context" "fmt" + "os" "github.com/spf13/afero" "github.com/supabase/cli/internal/storage/client" @@ -29,5 +30,25 @@ func Run(ctx context.Context, projectRef string, interactive bool, fsys afero.Fs if err := api.UpsertBuckets(ctx, utils.Config.Storage.Buckets, filter); err != nil { return err } + prune := func(name string) bool { + label := fmt.Sprintf("Bucket %s not found in %s. Do you want to prune it?", utils.Bold(name), utils.Bold(utils.ConfigPath)) + shouldPrune, err := console.PromptYesNo(ctx, label, false) + if err != nil { + fmt.Fprintln(utils.GetDebugLogger(), err) + } + return shouldPrune + } + if utils.Config.Storage.AnalyticsBuckets.Enabled && len(projectRef) > 0 { + fmt.Fprintln(os.Stderr, "Updating analytics buckets...") + if err := api.UpsertAnalyticsBuckets(ctx, utils.Config.Storage.AnalyticsBuckets.Buckets, prune); err != nil { + return err + } + } + if utils.Config.Storage.VectorBuckets.Enabled && len(projectRef) > 0 { + fmt.Fprintln(os.Stderr, "Updating vector buckets...") + if err := api.UpsertVectorBuckets(ctx, utils.Config.Storage.VectorBuckets.Buckets, prune); err != nil { + return err + } + } return api.UpsertObjects(ctx, utils.Config.Storage.Buckets, utils.NewRootFS(fsys)) } diff --git a/internal/services/services.go b/internal/services/services.go index 1feeff3c4..46d399c43 100644 --- a/internal/services/services.go +++ b/internal/services/services.go @@ -5,14 +5,13 @@ import ( "fmt" "os" "strings" - "sync" "github.com/go-errors/errors" "github.com/spf13/afero" - "github.com/spf13/viper" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/flags" "github.com/supabase/cli/internal/utils/tenant" + "github.com/supabase/cli/pkg/queue" ) func Run(ctx context.Context, fsys afero.Fs) error { @@ -77,39 +76,53 @@ func CheckVersions(ctx context.Context, fsys afero.Fs) []imageVersion { } func listRemoteImages(ctx context.Context, projectRef string) map[string]string { - linked := make(map[string]string, 4) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - if version, err := tenant.GetDatabaseVersion(ctx, projectRef); err == nil { - linked[utils.Config.Db.Image] = version - } - }() + linked := map[string]string{} keys, err := tenant.GetApiKeys(ctx, projectRef) if err != nil { - wg.Wait() return linked } + jq := queue.NewJobQueue(5) api := tenant.NewTenantAPI(ctx, projectRef, keys.ServiceRole) - wg.Add(2) - go func() { - defer wg.Done() - if version, err := api.GetGotrueVersion(ctx); err == nil { - linked[utils.Config.Auth.Image] = version - } else if viper.GetBool("DEBUG") { - fmt.Fprintln(os.Stderr, err) - } - }() - go func() { - defer wg.Done() - if version, err := api.GetPostgrestVersion(ctx); err == nil { - linked[utils.Config.Api.Image] = version - } else if viper.GetBool("DEBUG") { - fmt.Fprintln(os.Stderr, err) + jobs := []func() error{ + func() error { + version, err := tenant.GetDatabaseVersion(ctx, projectRef) + if err == nil { + linked[utils.Config.Db.Image] = version + } + return nil + }, + func() error { + version, err := api.GetGotrueVersion(ctx) + if err == nil { + linked[utils.Config.Auth.Image] = version + } + return nil + }, + func() error { + version, err := api.GetPostgrestVersion(ctx) + if err == nil { + linked[utils.Config.Api.Image] = version + } + return nil + }, + func() error { + version, err := api.GetStorageVersion(ctx) + if err == nil { + linked[utils.Config.Storage.Image] = version + } + return err + }, + } + // Ignore non-fatal errors linking services + logger := utils.GetDebugLogger() + for _, job := range jobs { + if err := jq.Put(job); err != nil { + fmt.Fprintln(logger, err) } - }() - wg.Wait() + } + if err := jq.Collect(); err != nil { + fmt.Fprintln(logger, err) + } return linked } diff --git a/internal/start/start.go b/internal/start/start.go index 76542429f..758ac30d1 100644 --- a/internal/start/start.go +++ b/internal/start/start.go @@ -244,9 +244,10 @@ func run(ctx context.Context, fsys afero.Fs, excludedContainers []string, dbConf } var started []string - var isStorageEnabled = utils.Config.Storage.Enabled && !isContainerExcluded(utils.Config.Storage.Image, excluded) - var isImgProxyEnabled = utils.Config.Storage.ImageTransformation != nil && + isStorageEnabled := utils.Config.Storage.Enabled && !isContainerExcluded(utils.Config.Storage.Image, excluded) + isImgProxyEnabled := utils.Config.Storage.ImageTransformation != nil && utils.Config.Storage.ImageTransformation.Enabled && !isContainerExcluded(utils.Config.Storage.ImgProxyImage, excluded) + isS3ProtocolEnabled := utils.Config.Storage.S3Protocol != nil && utils.Config.Storage.S3Protocol.Enabled fmt.Fprintln(os.Stderr, "Starting containers...") // Start Logflare @@ -793,7 +794,10 @@ EOF env = append(env, fmt.Sprintf("GOTRUE_EXTERNAL_%s_URL=%s", strings.ToUpper(name), config.Url)) } } - env = append(env, fmt.Sprintf("GOTRUE_EXTERNAL_WEB3_SOLANA_ENABLED=%v", utils.Config.Auth.Web3.Solana.Enabled)) + env = append(env, + fmt.Sprintf("GOTRUE_EXTERNAL_WEB3_SOLANA_ENABLED=%v", utils.Config.Auth.Web3.Solana.Enabled), + fmt.Sprintf("GOTRUE_EXTERNAL_WEB3_ETHEREUM_ENABLED=%v", utils.Config.Auth.Web3.Ethereum.Enabled), + ) // OAuth server configuration if utils.Config.Auth.OAuthServer.Enabled { @@ -994,6 +998,7 @@ EOF fmt.Sprintf("ENABLE_IMAGE_TRANSFORMATION=%t", isImgProxyEnabled), fmt.Sprintf("IMGPROXY_URL=http://%s:5001", utils.ImgProxyId), "TUS_URL_PATH=/storage/v1/upload/resumable", + fmt.Sprintf("S3_PROTOCOL_ENABLED=%t", isS3ProtocolEnabled), "S3_PROTOCOL_ACCESS_KEY_ID=" + utils.Config.Storage.S3Credentials.AccessKeyId, "S3_PROTOCOL_ACCESS_KEY_SECRET=" + utils.Config.Storage.S3Credentials.SecretAccessKey, "S3_PROTOCOL_PREFIX=/storage/v1", diff --git a/internal/status/status.go b/internal/status/status.go index ee087062b..62532d7ea 100644 --- a/internal/status/status.go +++ b/internal/status/status.go @@ -10,15 +10,16 @@ import ( "net/http" "net/url" "os" - "reflect" "slices" - "strings" "sync" "time" + "github.com/Netflix/go-env" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/go-errors/errors" + "github.com/olekukonko/tablewriter" + "github.com/olekukonko/tablewriter/tw" "github.com/spf13/afero" "github.com/supabase/cli/internal/utils" "github.com/supabase/cli/internal/utils/flags" @@ -27,9 +28,11 @@ import ( type CustomName struct { ApiURL string `env:"api.url,default=API_URL"` + RestURL string `env:"api.rest_url,default=REST_URL"` GraphqlURL string `env:"api.graphql_url,default=GRAPHQL_URL"` StorageS3URL string `env:"api.storage_s3_url,default=STORAGE_S3_URL"` McpURL string `env:"api.mcp_url,default=MCP_URL"` + FunctionsURL string `env:"api.functions_url,default=FUNCTIONS_URL"` DbURL string `env:"db.url,default=DB_URL"` StudioURL string `env:"studio.url,default=STUDIO_URL"` InbucketURL string `env:"inbucket.url,default=INBUCKET_URL,deprecated"` @@ -54,10 +57,15 @@ func (c *CustomName) toValues(exclude ...string) map[string]string { authEnabled := utils.Config.Auth.Enabled && !slices.Contains(exclude, utils.GotrueId) && !slices.Contains(exclude, utils.ShortContainerImageName(utils.Config.Auth.Image)) inbucketEnabled := utils.Config.Inbucket.Enabled && !slices.Contains(exclude, utils.InbucketId) && !slices.Contains(exclude, utils.ShortContainerImageName(utils.Config.Inbucket.Image)) storageEnabled := utils.Config.Storage.Enabled && !slices.Contains(exclude, utils.StorageId) && !slices.Contains(exclude, utils.ShortContainerImageName(utils.Config.Storage.Image)) + functionsEnabled := utils.Config.EdgeRuntime.Enabled && !slices.Contains(exclude, utils.EdgeRuntimeId) && !slices.Contains(exclude, utils.ShortContainerImageName(utils.Config.EdgeRuntime.Image)) if apiEnabled { values[c.ApiURL] = utils.Config.Api.ExternalUrl + values[c.RestURL] = utils.GetApiUrl("/rest/v1") values[c.GraphqlURL] = utils.GetApiUrl("/graphql/v1") + if functionsEnabled { + values[c.FunctionsURL] = utils.GetApiUrl("/functions/v1") + } if studioEnabled { values[c.McpURL] = utils.GetApiUrl("/mcp") } @@ -76,7 +84,7 @@ func (c *CustomName) toValues(exclude ...string) map[string]string { values[c.MailpitURL] = fmt.Sprintf("http://%s:%d", utils.Config.Hostname, utils.Config.Inbucket.Port) values[c.InbucketURL] = fmt.Sprintf("http://%s:%d", utils.Config.Hostname, utils.Config.Inbucket.Port) } - if storageEnabled { + if storageEnabled && utils.Config.Storage.S3Protocol != nil && utils.Config.Storage.S3Protocol.Enabled { values[c.StorageS3URL] = utils.GetApiUrl("/storage/v1/s3") values[c.StorageS3AccessKeyId] = utils.Config.Storage.S3Credentials.AccessKeyId values[c.StorageS3SecretAccessKey] = utils.Config.Storage.S3Credentials.SecretAccessKey @@ -210,44 +218,159 @@ func printStatus(names CustomName, format string, w io.Writer, exclude ...string } func PrettyPrint(w io.Writer, exclude ...string) { - names := CustomName{ - ApiURL: " " + utils.Aqua("API URL"), - GraphqlURL: " " + utils.Aqua("GraphQL URL"), - StorageS3URL: " " + utils.Aqua("S3 Storage URL"), - McpURL: " " + utils.Aqua("MCP URL"), - DbURL: " " + utils.Aqua("Database URL"), - StudioURL: " " + utils.Aqua("Studio URL"), - InbucketURL: " " + utils.Aqua("Inbucket URL"), - MailpitURL: " " + utils.Aqua("Mailpit URL"), - PublishableKey: " " + utils.Aqua("Publishable key"), - SecretKey: " " + utils.Aqua("Secret key"), - JWTSecret: " " + utils.Aqua("JWT secret"), - AnonKey: " " + utils.Aqua("anon key"), - ServiceRoleKey: "" + utils.Aqua("service_role key"), - StorageS3AccessKeyId: " " + utils.Aqua("S3 Access Key"), - StorageS3SecretAccessKey: " " + utils.Aqua("S3 Secret Key"), - StorageS3Region: " " + utils.Aqua("S3 Region"), + logger := utils.GetDebugLogger() + + names := CustomName{} + if err := env.Unmarshal(env.EnvSet{}, &names); err != nil { + fmt.Fprintln(logger, err) } values := names.toValues(exclude...) - // Iterate through map in order of declared struct fields - t := reflect.TypeOf(names) - val := reflect.ValueOf(names) - for i := 0; i < val.NumField(); i++ { - k := val.Field(i).String() - if tag := t.Field(i).Tag.Get("env"); isDeprecated(tag) { + + groups := []OutputGroup{ + { + Name: "🛠️ Development Tools", + Items: []OutputItem{ + {Label: "Studio", Value: values[names.StudioURL], Type: Link}, + {Label: "Mailpit", Value: values[names.MailpitURL], Type: Link}, + {Label: "MCP", Value: values[names.McpURL], Type: Link}, + }, + }, + { + Name: "🌐 APIs", + Items: []OutputItem{ + {Label: "Project URL", Value: values[names.ApiURL], Type: Link}, + {Label: "REST", Value: values[names.RestURL], Type: Link}, + {Label: "GraphQL", Value: values[names.GraphqlURL], Type: Link}, + {Label: "Edge Functions", Value: values[names.FunctionsURL], Type: Link}, + }, + }, + { + Name: "🗄️ Database", + Items: []OutputItem{ + {Label: "URL", Value: values[names.DbURL], Type: Link}, + }, + }, + { + Name: "🔑 Authentication Keys", + Items: []OutputItem{ + {Label: "Publishable", Value: values[names.PublishableKey], Type: Key}, + {Label: "Secret", Value: values[names.SecretKey], Type: Key}, + }, + }, + { + Name: "📦 Storage (S3)", + Items: []OutputItem{ + {Label: "URL", Value: values[names.StorageS3URL], Type: Link}, + {Label: "Access Key", Value: values[names.StorageS3AccessKeyId], Type: Key}, + {Label: "Secret Key", Value: values[names.StorageS3SecretAccessKey], Type: Key}, + {Label: "Region", Value: values[names.StorageS3Region], Type: Text}, + }, + }, + } + + for _, group := range groups { + if err := group.printTable(w); err != nil { + fmt.Fprintln(logger, err) + } else { + fmt.Fprintln(w) + } + } +} + +type OutputType string + +const ( + Text OutputType = "text" + Link OutputType = "link" + Key OutputType = "key" +) + +type OutputItem struct { + Label string + Value string + Type OutputType +} + +type OutputGroup struct { + Name string + Items []OutputItem +} + +func (g *OutputGroup) printTable(w io.Writer) error { + table := tablewriter.NewTable(w, + // Rounded corners + tablewriter.WithSymbols(tw.NewSymbols(tw.StyleRounded)), + + // Table content formatting + tablewriter.WithConfig(tablewriter.Config{ + Header: tw.CellConfig{ + Formatting: tw.CellFormatting{ + AutoFormat: tw.Off, + MergeMode: tw.MergeHorizontal, + }, + Alignment: tw.CellAlignment{ + Global: tw.AlignLeft, + }, + Filter: tw.CellFilter{ + Global: func(s []string) []string { + for i := range s { + s[i] = utils.Bold(s[i]) + } + return s + }, + }, + }, + Row: tw.CellConfig{ + Alignment: tw.CellAlignment{ + Global: tw.AlignLeft, + }, + ColMaxWidths: tw.CellWidth{ + PerColumn: map[int]int{0: 16}, + }, + Filter: tw.CellFilter{ + PerColumn: []func(string) string{ + func(s string) string { + return utils.Green(s) + }, + }, + }, + }, + Behavior: tw.Behavior{ + Compact: tw.Compact{ + Merge: tw.On, + }, + }, + }), + + // Set title as header (merged across all columns) + tablewriter.WithHeader([]string{g.Name, g.Name}), + ) + + // Add data rows with values colored based on type + shouldRender := false + for _, row := range g.Items { + if row.Value == "" { continue } - if v, ok := values[k]; ok { - fmt.Fprintf(w, "%s: %s\n", k, v) + value := row.Value + switch row.Type { + case Link: + value = utils.Aqua(row.Value) + case Key: + value = utils.Yellow(row.Value) + } + if err := table.Append(row.Label, value); err != nil { + return errors.Errorf("failed to append row: %w", err) } + shouldRender = true } -} -func isDeprecated(tag string) bool { - for part := range strings.SplitSeq(tag, ",") { - if strings.EqualFold(part, "deprecated") { - return true + // Ensure at least one item in the group is non-empty + if shouldRender { + if err := table.Render(); err != nil { + return errors.Errorf("failed to render table: %w", err) } } - return false + + return nil } diff --git a/internal/utils/colors.go b/internal/utils/colors.go index ed710c4a2..f4f82652c 100644 --- a/internal/utils/colors.go +++ b/internal/utils/colors.go @@ -13,6 +13,10 @@ func Yellow(str string) string { return lipgloss.NewStyle().Foreground(lipgloss.Color("11")).Render(str) } +func Green(str string) string { + return lipgloss.NewStyle().Foreground(lipgloss.Color("10")).Render(str) +} + // For errors. func Red(str string) string { return lipgloss.NewStyle().Foreground(lipgloss.Color("9")).Render(str) diff --git a/internal/utils/tenant/database.go b/internal/utils/tenant/database.go index 1ae1ee6a7..ababf74af 100644 --- a/internal/utils/tenant/database.go +++ b/internal/utils/tenant/database.go @@ -10,17 +10,15 @@ import ( var errDatabaseVersion = errors.New("Database version not found.") func GetDatabaseVersion(ctx context.Context, projectRef string) (string, error) { - resp, err := utils.GetSupabase().V1ListAllProjectsWithResponse(ctx) + resp, err := utils.GetSupabase().V1GetProjectWithResponse(ctx, projectRef) if err != nil { - return "", errors.Errorf("failed to retrieve projects: %w", err) + return "", errors.Errorf("failed to retrieve project: %w", err) } if resp.JSON200 == nil { - return "", errors.New("Unexpected error retrieving projects: " + string(resp.Body)) + return "", errors.Errorf("unexpected retrieve project status %d: %s", resp.StatusCode(), string(resp.Body)) } - for _, project := range *resp.JSON200 { - if project.Id == projectRef && len(project.Database.Version) > 0 { - return project.Database.Version, nil - } + if len(resp.JSON200.Database.Version) > 0 { + return resp.JSON200.Database.Version, nil } return "", errors.New(errDatabaseVersion) } diff --git a/internal/utils/tenant/database_test.go b/internal/utils/tenant/database_test.go index d80ac5cba..5a042cd74 100644 --- a/internal/utils/tenant/database_test.go +++ b/internal/utils/tenant/database_test.go @@ -22,9 +22,9 @@ func TestGetDatabaseVersion(t *testing.T) { mockPostgres := api.V1ProjectWithDatabaseResponse{Id: projectRef} mockPostgres.Database.Version = "14.1.0.99" gock.New(utils.DefaultApiHost). - Get("/v1/projects"). + Get("/v1/projects/" + projectRef). Reply(http.StatusOK). - JSON([]api.V1ProjectWithDatabaseResponse{mockPostgres}) + JSON(mockPostgres) version, err := GetDatabaseVersion(context.Background(), projectRef) @@ -39,16 +39,13 @@ func TestGetDatabaseVersion(t *testing.T) { defer gock.OffAll() projectRef := apitest.RandomProjectRef() - mockPostgres := api.V1ProjectWithDatabaseResponse{Id: "different-project"} - mockPostgres.Database.Version = "14.1.0.99" gock.New(utils.DefaultApiHost). - Get("/v1/projects"). - Reply(http.StatusOK). - JSON([]api.V1ProjectWithDatabaseResponse{mockPostgres}) + Get("/v1/projects/" + projectRef). + Reply(http.StatusNotFound) version, err := GetDatabaseVersion(context.Background(), projectRef) - assert.ErrorIs(t, err, errDatabaseVersion) + assert.ErrorContains(t, err, "unexpected retrieve project status 404:") assert.Empty(t, version) assert.Empty(t, apitest.ListUnmatchedRequests()) }) @@ -60,11 +57,9 @@ func TestGetDatabaseVersion(t *testing.T) { defer gock.OffAll() projectRef := apitest.RandomProjectRef() gock.New(utils.DefaultApiHost). - Get("/v1/projects"). + Get("/v1/projects/" + projectRef). Reply(http.StatusOK). - JSON([]api.V1ProjectWithDatabaseResponse{{ - Id: projectRef, - }}) + JSON(api.V1ProjectWithDatabaseResponse{Id: projectRef}) version, err := GetDatabaseVersion(context.Background(), projectRef) diff --git a/package.json b/package.json index 27f3470fd..60a47f336 100644 --- a/package.json +++ b/package.json @@ -34,7 +34,7 @@ }, { "name": "develop", - "channel": "beta" + "channel": "latest" } ], "plugins": [ diff --git a/pkg/api/types.gen.go b/pkg/api/types.gen.go index 5dd91424c..34029acbc 100644 --- a/pkg/api/types.gen.go +++ b/pkg/api/types.gen.go @@ -3324,23 +3324,23 @@ type StorageConfigResponse struct { UpstreamTarget StorageConfigResponseExternalUpstreamTarget `json:"upstreamTarget"` } `json:"external"` Features struct { - IcebergCatalog *struct { + IcebergCatalog struct { Enabled bool `json:"enabled"` MaxCatalogs int `json:"maxCatalogs"` MaxNamespaces int `json:"maxNamespaces"` MaxTables int `json:"maxTables"` - } `json:"icebergCatalog,omitempty"` + } `json:"icebergCatalog"` ImageTransformation struct { Enabled bool `json:"enabled"` } `json:"imageTransformation"` S3Protocol struct { Enabled bool `json:"enabled"` } `json:"s3Protocol"` - VectorBuckets *struct { + VectorBuckets struct { Enabled bool `json:"enabled"` MaxBuckets int `json:"maxBuckets"` MaxIndexes int `json:"maxIndexes"` - } `json:"vectorBuckets,omitempty"` + } `json:"vectorBuckets"` } `json:"features"` FileSizeLimit int64 `json:"fileSizeLimit"` MigrationVersion string `json:"migrationVersion"` @@ -3900,12 +3900,12 @@ type UpdateStorageConfigBody struct { MaxNamespaces int `json:"maxNamespaces"` MaxTables int `json:"maxTables"` } `json:"icebergCatalog,omitempty"` - ImageTransformation struct { + ImageTransformation *struct { Enabled bool `json:"enabled"` - } `json:"imageTransformation"` - S3Protocol struct { + } `json:"imageTransformation,omitempty"` + S3Protocol *struct { Enabled bool `json:"enabled"` - } `json:"s3Protocol"` + } `json:"s3Protocol,omitempty"` VectorBuckets *struct { Enabled bool `json:"enabled"` MaxBuckets int `json:"maxBuckets"` diff --git a/pkg/config/config.go b/pkg/config/config.go index 14733b162..3096f4d55 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -291,10 +291,16 @@ func (a *auth) Clone() auth { func (s *storage) Clone() storage { copy := *s copy.Buckets = maps.Clone(s.Buckets) + copy.AnalyticsBuckets.Buckets = maps.Clone(s.AnalyticsBuckets.Buckets) + copy.VectorBuckets.Buckets = maps.Clone(s.VectorBuckets.Buckets) if s.ImageTransformation != nil { img := *s.ImageTransformation copy.ImageTransformation = &img } + if s.S3Protocol != nil { + s3 := *s.S3Protocol + copy.S3Protocol = &s3 + } return copy } @@ -540,6 +546,15 @@ func (c *config) load(v *viper.Viper) error { }); err != nil { return errors.Errorf("failed to parse config: %w", err) } + // Manually parse config to map + c.Storage.AnalyticsBuckets.Buckets = map[string]struct{}{} + for key := range v.GetStringMap("storage.analytics.buckets") { + c.Storage.AnalyticsBuckets.Buckets[key] = struct{}{} + } + c.Storage.VectorBuckets.Buckets = map[string]struct{}{} + for key := range v.GetStringMap("storage.vector.buckets") { + c.Storage.VectorBuckets.Buckets[key] = struct{}{} + } // Convert keys to upper case: https://github.com/spf13/viper/issues/1014 secrets := make(SecretsConfig, len(c.EdgeRuntime.Secrets)) for k, v := range c.EdgeRuntime.Secrets { @@ -614,7 +629,7 @@ func (c *config) Load(path string, fsys fs.FS, overrides ...ConfigEditor) error } if version, err := fs.ReadFile(fsys, builder.StorageVersionPath); err == nil && len(version) > 0 { // Only replace image if local storage version is newer - if i := strings.IndexByte(Images.Storage, ':'); semver.Compare(string(version), Images.Storage[i+1:]) > 0 { + if i := strings.IndexByte(Images.Storage, ':'); semver.Compare(strings.TrimSpace(string(version)), Images.Storage[i+1:]) > 0 { c.Storage.Image = replaceImageTag(Images.Storage, string(version)) } } diff --git a/pkg/config/storage.go b/pkg/config/storage.go index a692d238e..7fdffb3a8 100644 --- a/pkg/config/storage.go +++ b/pkg/config/storage.go @@ -14,14 +14,36 @@ type ( ImgProxyImage string `toml:"-"` FileSizeLimit sizeInBytes `toml:"file_size_limit"` ImageTransformation *imageTransformation `toml:"image_transformation"` + S3Protocol *s3Protocol `toml:"s3_protocol"` S3Credentials storageS3Credentials `toml:"-"` Buckets BucketConfig `toml:"buckets"` + AnalyticsBuckets analyticsBuckets `toml:"analytics"` + VectorBuckets vectorBuckets `toml:"vector"` } imageTransformation struct { Enabled bool `toml:"enabled"` } + analyticsBuckets struct { + Enabled bool `toml:"enabled"` + MaxNamespaces uint `toml:"max_namespaces"` + MaxTables uint `toml:"max_tables"` + MaxCatalogs uint `toml:"max_catalogs"` + Buckets map[string]struct{} `toml:"buckets"` + } + + vectorBuckets struct { + Enabled bool `toml:"enabled"` + MaxBuckets uint `toml:"max_buckets"` + MaxIndexes uint `toml:"max_indexes"` + Buckets map[string]struct{} `toml:"buckets"` + } + + s3Protocol struct { + Enabled bool `toml:"enabled"` + } + storageS3Credentials struct { AccessKeyId string `toml:"-"` SecretAccessKey string `toml:"-"` @@ -41,47 +63,94 @@ type ( func (s *storage) ToUpdateStorageConfigBody() v1API.UpdateStorageConfigBody { body := v1API.UpdateStorageConfigBody{ FileSizeLimit: cast.Ptr(int64(s.FileSizeLimit)), - } - // When local config is not set, we assume platform defaults should not change - if s.ImageTransformation != nil { - body.Features = &struct { + Features: &struct { IcebergCatalog *struct { Enabled bool `json:"enabled"` MaxCatalogs int `json:"maxCatalogs"` MaxNamespaces int `json:"maxNamespaces"` MaxTables int `json:"maxTables"` } `json:"icebergCatalog,omitempty"` - ImageTransformation struct { + ImageTransformation *struct { Enabled bool `json:"enabled"` - } `json:"imageTransformation"` - S3Protocol struct { + } `json:"imageTransformation,omitempty"` + S3Protocol *struct { Enabled bool `json:"enabled"` - } `json:"s3Protocol"` + } `json:"s3Protocol,omitempty"` VectorBuckets *struct { Enabled bool `json:"enabled"` MaxBuckets int `json:"maxBuckets"` MaxIndexes int `json:"maxIndexes"` } `json:"vectorBuckets,omitempty"` - }{} - body.Features.ImageTransformation.Enabled = s.ImageTransformation.Enabled + }{}, + } + // When local config is not set, we assume platform defaults should not change + if s.ImageTransformation != nil { + body.Features.ImageTransformation = &struct { + Enabled bool `json:"enabled"` + }{ + Enabled: s.ImageTransformation.Enabled, + } + } + // Disabling analytics and vector buckets means leaving platform values unchanged + if s.AnalyticsBuckets.Enabled { + body.Features.IcebergCatalog = &struct { + Enabled bool `json:"enabled"` + MaxCatalogs int `json:"maxCatalogs"` + MaxNamespaces int `json:"maxNamespaces"` + MaxTables int `json:"maxTables"` + }{ + Enabled: true, + MaxNamespaces: cast.UintToInt(s.AnalyticsBuckets.MaxNamespaces), + MaxTables: cast.UintToInt(s.AnalyticsBuckets.MaxTables), + MaxCatalogs: cast.UintToInt(s.AnalyticsBuckets.MaxCatalogs), + } + } + if s.VectorBuckets.Enabled { + body.Features.VectorBuckets = &struct { + Enabled bool `json:"enabled"` + MaxBuckets int `json:"maxBuckets"` + MaxIndexes int `json:"maxIndexes"` + }{ + Enabled: true, + MaxBuckets: cast.UintToInt(s.VectorBuckets.MaxBuckets), + MaxIndexes: cast.UintToInt(s.VectorBuckets.MaxIndexes), + } + } + if s.S3Protocol != nil { + body.Features.S3Protocol = &struct { + Enabled bool `json:"enabled"` + }{ + Enabled: s.S3Protocol.Enabled, + } } return body } func (s *storage) FromRemoteStorageConfig(remoteConfig v1API.StorageConfigResponse) { s.FileSizeLimit = sizeInBytes(remoteConfig.FileSizeLimit) + s.TargetMigration = remoteConfig.MigrationVersion // When local config is not set, we assume platform defaults should not change if s.ImageTransformation != nil { s.ImageTransformation.Enabled = remoteConfig.Features.ImageTransformation.Enabled } + if s.AnalyticsBuckets.Enabled { + s.AnalyticsBuckets.Enabled = remoteConfig.Features.IcebergCatalog.Enabled + s.AnalyticsBuckets.MaxNamespaces = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxNamespaces) + s.AnalyticsBuckets.MaxTables = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxTables) + s.AnalyticsBuckets.MaxCatalogs = cast.IntToUint(remoteConfig.Features.IcebergCatalog.MaxCatalogs) + } + if s.VectorBuckets.Enabled { + s.VectorBuckets.Enabled = remoteConfig.Features.VectorBuckets.Enabled + s.VectorBuckets.MaxBuckets = cast.IntToUint(remoteConfig.Features.VectorBuckets.MaxBuckets) + s.VectorBuckets.MaxIndexes = cast.IntToUint(remoteConfig.Features.VectorBuckets.MaxIndexes) + } + if s.S3Protocol != nil { + s.S3Protocol.Enabled = remoteConfig.Features.S3Protocol.Enabled + } } func (s *storage) DiffWithRemote(remoteConfig v1API.StorageConfigResponse) ([]byte, error) { copy := s.Clone() - if s.ImageTransformation != nil { - img := *s.ImageTransformation - copy.ImageTransformation = &img - } // Convert the config values into easily comparable remoteConfig values currentValue, err := ToTomlBytes(copy) if err != nil { diff --git a/pkg/config/templates/Dockerfile b/pkg/config/templates/Dockerfile index 52fbd6229..c64d7578d 100644 --- a/pkg/config/templates/Dockerfile +++ b/pkg/config/templates/Dockerfile @@ -1,19 +1,19 @@ # Exposed for updates by .github/dependabot.yml -FROM supabase/postgres:17.6.1.054 AS pg +FROM supabase/postgres:17.6.1.058 AS pg # Append to ServiceImages when adding new dependencies below FROM library/kong:2.8.1 AS kong FROM axllent/mailpit:v1.22.3 AS mailpit -FROM postgrest/postgrest:v13.0.7 AS postgrest +FROM postgrest/postgrest:v14.1 AS postgrest FROM supabase/postgres-meta:v0.93.1 AS pgmeta -FROM supabase/studio:2025.11.25-sha-8de52c4 AS studio +FROM supabase/studio:2025.12.01-sha-4ad48b7 AS studio FROM darthsim/imgproxy:v3.8.0 AS imgproxy -FROM supabase/edge-runtime:v1.69.25 AS edgeruntime +FROM supabase/edge-runtime:v1.69.27 AS edgeruntime FROM timberio/vector:0.28.1-alpine AS vector FROM supabase/supavisor:2.7.4 AS supavisor FROM supabase/gotrue:v2.183.0 AS gotrue -FROM supabase/realtime:v2.65.3 AS realtime -FROM supabase/storage-api:v1.32.0 AS storage -FROM supabase/logflare:1.26.13 AS logflare +FROM supabase/realtime:v2.66.2 AS realtime +FROM supabase/storage-api:v1.32.1 AS storage +FROM supabase/logflare:1.26.16 AS logflare # Append to JobImages when adding new dependencies below FROM supabase/pgadmin-schema-diff:cli-0.0.5 AS differ FROM supabase/migra:3.0.1663481299 AS migra diff --git a/pkg/config/templates/config.toml b/pkg/config/templates/config.toml index 134be6085..3fb0d0b54 100644 --- a/pkg/config/templates/config.toml +++ b/pkg/config/templates/config.toml @@ -105,10 +105,6 @@ enabled = true # The maximum file size allowed (e.g. "5MB", "500KB"). file_size_limit = "50MiB" -# Image transformation API is available to Supabase Pro plan. -# [storage.image_transformation] -# enabled = true - # Uncomment to configure local storage buckets # [storage.buckets.images] # public = false @@ -116,6 +112,35 @@ file_size_limit = "50MiB" # allowed_mime_types = ["image/png", "image/jpeg"] # objects_path = "./images" +# Uncomment to allow connections via S3 compatible clients +# [storage.s3_protocol] +# enabled = true + +# Image transformation API is available to Supabase Pro plan. +# [storage.image_transformation] +# enabled = true + +# Store analytical data in S3 for running ETL jobs over Iceberg Catalog +# This feature is only available on the hosted platform. +[storage.analytics] +enabled = false +max_namespaces = 5 +max_tables = 10 +max_catalogs = 2 + +# Analytics Buckets is available to Supabase Pro plan. +# [storage.analytics.buckets.my-warehouse] + +# Store vector embeddings in S3 for large and durable datasets +# This feature is only available on the hosted platform. +[storage.vector] +enabled = false +max_buckets = 10 +max_indexes = 5 + +# Vector Buckets is available to Supabase Pro plan. +# [storage.vector.buckets.documents-openai] + [auth] enabled = true # The base URL of your website. Used as an allow-list for redirects and for constructing URLs used diff --git a/pkg/config/testdata/config.toml b/pkg/config/testdata/config.toml index 1b68b6d73..0b4974321 100644 --- a/pkg/config/testdata/config.toml +++ b/pkg/config/testdata/config.toml @@ -105,10 +105,6 @@ enabled = true # The maximum file size allowed (e.g. "5MB", "500KB"). file_size_limit = "50MiB" -# Image transformation API is available to Supabase Pro plan. -[storage.image_transformation] -enabled = true - # Uncomment to configure local storage buckets [storage.buckets.images] public = false @@ -116,6 +112,33 @@ file_size_limit = "50MiB" allowed_mime_types = ["image/png", "image/jpeg"] objects_path = "./images" +# Uncomment to allow connections via S3 compatible clients +[storage.s3_protocol] +enabled = true + +# Image transformation API is available to Supabase Pro plan. +[storage.image_transformation] +enabled = true + +# Store analytical data in S3 for running ETL jobs over Iceberg Catalog +[storage.analytics] +enabled = true +max_namespaces = 5 +max_tables = 10 +max_catalogs = 2 + +# Analytics Buckets is available to Supabase Pro plan. +[storage.analytics.buckets.my-warehouse] + +# Store vector embeddings in S3 for large and durable datasets +[storage.vector] +enabled = true +max_buckets = 10 +max_indexes = 5 + +# Vector Buckets is available to Supabase Pro plan. +# [storage.vector.buckets.documents-openai] + [auth] enabled = true # The base URL of your website. Used as an allow-list for redirects and for constructing URLs used diff --git a/pkg/config/updater_test.go b/pkg/config/updater_test.go index 7c6deb410..5759c9ba8 100644 --- a/pkg/config/updater_test.go +++ b/pkg/config/updater_test.go @@ -7,6 +7,7 @@ import ( "github.com/h2non/gock" "github.com/oapi-codegen/nullable" + openapi_types "github.com/oapi-codegen/runtime/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" v1API "github.com/supabase/cli/pkg/api" @@ -222,27 +223,9 @@ func TestUpdateStorageConfig(t *testing.T) { defer gock.Off() mockStorage := v1API.StorageConfigResponse{ FileSizeLimit: 100, - Features: struct { - IcebergCatalog *struct { - Enabled bool `json:"enabled"` - MaxCatalogs int `json:"maxCatalogs"` - MaxNamespaces int `json:"maxNamespaces"` - MaxTables int `json:"maxTables"` - } `json:"icebergCatalog,omitempty"` - ImageTransformation struct { - Enabled bool `json:"enabled"` - } `json:"imageTransformation"` - S3Protocol struct { - Enabled bool `json:"enabled"` - } `json:"s3Protocol"` - VectorBuckets *struct { - Enabled bool `json:"enabled"` - MaxBuckets int `json:"maxBuckets"` - MaxIndexes int `json:"maxIndexes"` - } `json:"vectorBuckets,omitempty"` - }{}, } mockStorage.Features.ImageTransformation.Enabled = true + mockStorage.Features.S3Protocol.Enabled = true gock.New(server). Get("/v1/projects/test-project/config/storage"). Reply(http.StatusOK). @@ -313,11 +296,18 @@ func TestUpdateRemoteConfig(t *testing.T) { JSON(v1API.PostgresConfigResponse{ MaxConnections: cast.Ptr(cast.UintToInt(100)), }) + // Network config + gock.New(server). + Get("/v1/projects/test-project/network-restrictions"). + Reply(http.StatusOK). + JSON(v1API.V1GetNetworkRestrictionsResponse{}) // Auth config gock.New(server). Get("/v1/projects/test-project/config/auth"). Reply(http.StatusOK). - JSON(v1API.AuthConfigResponse{}) + JSON(v1API.AuthConfigResponse{ + SmtpAdminEmail: nullable.NewNullableWithValue(openapi_types.Email("abc@example.com")), + }) gock.New(server). Patch("/v1/projects/test-project/config/auth"). Reply(http.StatusOK) @@ -357,6 +347,9 @@ func TestUpdateRemoteConfig(t *testing.T) { ImageTransformation: &imageTransformation{ Enabled: true, }, + S3Protocol: &s3Protocol{ + Enabled: true, + }, }, Experimental: experimental{ Webhooks: &webhooks{ diff --git a/pkg/storage/analytics.go b/pkg/storage/analytics.go new file mode 100644 index 000000000..12f2abe01 --- /dev/null +++ b/pkg/storage/analytics.go @@ -0,0 +1,68 @@ +package storage + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/supabase/cli/pkg/fetcher" +) + +type AnalyticsBucketResponse struct { + Id string `json:"id"` // "test" + Name string `json:"name"` // "test" + CreatedAt string `json:"created_at"` // "2023-10-13T17:48:58.491Z" + UpdatedAt string `json:"updated_at"` // "2023-10-13T17:48:58.491Z" +} + +type CreateAnalyticsBucketRequest struct { + BucketName string `json:"bucketName"` +} + +func (s *StorageAPI) UpsertAnalyticsBuckets(ctx context.Context, bucketConfig map[string]struct{}, filter ...func(string) bool) error { + resp, err := s.Send(ctx, http.MethodGet, "/storage/v1/iceberg/bucket", nil) + if err != nil { + return err + } + buckets, err := fetcher.ParseJSON[[]AnalyticsBucketResponse](resp.Body) + if err != nil { + return err + } + var toDelete []string + exists := make(map[string]struct{}, len(buckets)) + for _, b := range buckets { + exists[b.Name] = struct{}{} + if _, ok := bucketConfig[b.Name]; !ok { + toDelete = append(toDelete, b.Name) + } + } + for name := range bucketConfig { + if _, ok := exists[name]; ok { + fmt.Fprintln(os.Stderr, "Bucket already exists:", name) + continue + } + fmt.Fprintln(os.Stderr, "Creating analytics bucket:", name) + body := CreateAnalyticsBucketRequest{BucketName: name} + if resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/iceberg/bucket", body); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } +OUTER: + for _, name := range toDelete { + for _, keep := range filter { + if !keep(name) { + continue OUTER + } + } + fmt.Fprintln(os.Stderr, "Pruning analytics bucket:", name) + if resp, err := s.Send(ctx, http.MethodDelete, "/storage/v1/iceberg/bucket/"+name, nil); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + return nil +} diff --git a/pkg/storage/vector.go b/pkg/storage/vector.go new file mode 100644 index 000000000..7218f7b28 --- /dev/null +++ b/pkg/storage/vector.go @@ -0,0 +1,81 @@ +package storage + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/supabase/cli/pkg/fetcher" +) + +type VectorBucket struct { + VectorBucketName string `json:"vectorBucketName"` + CreationTime uint64 `json:"creationTime"` +} + +type ListVectorBucketsResponse struct { + VectorBuckets []VectorBucket `json:"vectorBuckets"` +} + +type ListVectorBucketsRequest struct { + MaxResults uint64 `json:"maxResults,omitempty"` + NextToken string `json:"nextToken,omitempty"` + Prefix string `json:"prefix,omitempty"` +} + +type CreateVectorBucketRequest struct { + VectorBucketName string `json:"vectorBucketName"` +} + +type DeleteVectorBucketRequest struct { + VectorBucketName string `json:"vectorBucketName"` +} + +func (s *StorageAPI) UpsertVectorBuckets(ctx context.Context, bucketConfig map[string]struct{}, filter ...func(string) bool) error { + resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/vector/ListVectorBuckets", ListVectorBucketsRequest{}) + if err != nil { + return err + } + result, err := fetcher.ParseJSON[ListVectorBucketsResponse](resp.Body) + if err != nil { + return err + } + var toDelete []string + exists := make(map[string]struct{}, len(result.VectorBuckets)) + for _, b := range result.VectorBuckets { + exists[b.VectorBucketName] = struct{}{} + if _, ok := bucketConfig[b.VectorBucketName]; !ok { + toDelete = append(toDelete, b.VectorBucketName) + } + } + for name := range bucketConfig { + if _, ok := exists[name]; ok { + fmt.Fprintln(os.Stderr, "Bucket already exists:", name) + continue + } + fmt.Fprintln(os.Stderr, "Creating vector bucket:", name) + body := CreateVectorBucketRequest{VectorBucketName: name} + if resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/vector/CreateVectorBucket", body); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } +OUTER: + for _, name := range toDelete { + for _, keep := range filter { + if !keep(name) { + continue OUTER + } + } + fmt.Fprintln(os.Stderr, "Pruning vector bucket:", name) + body := DeleteVectorBucketRequest{VectorBucketName: name} + if resp, err := s.Send(ctx, http.MethodPost, "/storage/v1/vector/DeleteVectorBucket", body); err != nil { + return err + } else if err := resp.Body.Close(); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + return nil +}