mirror of https://github.com/go-gitea/gitea.git
Azure blob storage support (#30995)
This PR implemented object storages(LFS/Packages/Attachments and etc.) for Azure Blob Storage. It depends on azure official golang SDK and can support both the azure blob storage cloud service and azurite mock server. Replace #25458 Fix #22527 - [x] CI Tests - [x] integration test, MSSQL integration tests will now based on azureblob - [x] unit test - [x] CLI Migrate Storage - [x] Documentation for configuration added ------ TODO (other PRs): - [ ] Improve performance of `blob download`. --------- Co-authored-by: yp05327 <576951401@qq.com>
This commit is contained in:
parent
015efcd8bf
commit
fb7b743bd0
|
@ -26,7 +26,8 @@
|
||||||
"ms-azuretools.vscode-docker",
|
"ms-azuretools.vscode-docker",
|
||||||
"vitest.explorer",
|
"vitest.explorer",
|
||||||
"qwtel.sqlite-viewer",
|
"qwtel.sqlite-viewer",
|
||||||
"GitHub.vscode-pull-request-github"
|
"GitHub.vscode-pull-request-github",
|
||||||
|
"Azurite.azurite"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
|
@ -119,6 +119,10 @@ jobs:
|
||||||
MINIO_SECRET_KEY: 12345678
|
MINIO_SECRET_KEY: 12345678
|
||||||
ports:
|
ports:
|
||||||
- "9000:9000"
|
- "9000:9000"
|
||||||
|
devstoreaccount1.azurite.local: # https://github.com/Azure/Azurite/issues/1583
|
||||||
|
image: mcr.microsoft.com/azure-storage/azurite:latest
|
||||||
|
ports:
|
||||||
|
- 10000:10000
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
@ -126,7 +130,7 @@ jobs:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
- name: Add hosts to /etc/hosts
|
- name: Add hosts to /etc/hosts
|
||||||
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mysql elasticsearch meilisearch smtpimap" | sudo tee -a /etc/hosts'
|
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 minio devstoreaccount1.azurite.local mysql elasticsearch meilisearch smtpimap" | sudo tee -a /etc/hosts'
|
||||||
- run: make deps-backend
|
- run: make deps-backend
|
||||||
- run: make backend
|
- run: make backend
|
||||||
env:
|
env:
|
||||||
|
@ -204,6 +208,10 @@ jobs:
|
||||||
SA_PASSWORD: MwantsaSecurePassword1
|
SA_PASSWORD: MwantsaSecurePassword1
|
||||||
ports:
|
ports:
|
||||||
- "1433:1433"
|
- "1433:1433"
|
||||||
|
devstoreaccount1.azurite.local: # https://github.com/Azure/Azurite/issues/1583
|
||||||
|
image: mcr.microsoft.com/azure-storage/azurite:latest
|
||||||
|
ports:
|
||||||
|
- 10000:10000
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
@ -211,7 +219,7 @@ jobs:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
check-latest: true
|
check-latest: true
|
||||||
- name: Add hosts to /etc/hosts
|
- name: Add hosts to /etc/hosts
|
||||||
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mssql" | sudo tee -a /etc/hosts'
|
run: '[ -e "/.dockerenv" ] || [ -e "/run/.containerenv" ] || echo "127.0.0.1 mssql devstoreaccount1.azurite.local" | sudo tee -a /etc/hosts'
|
||||||
- run: make deps-backend
|
- run: make deps-backend
|
||||||
- run: make backend
|
- run: make backend
|
||||||
env:
|
env:
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -40,7 +40,7 @@ var CmdMigrateStorage = &cli.Command{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Aliases: []string{"s"},
|
Aliases: []string{"s"},
|
||||||
Value: "",
|
Value: "",
|
||||||
Usage: "New storage type: local (default) or minio",
|
Usage: "New storage type: local (default), minio or azureblob",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "path",
|
Name: "path",
|
||||||
|
@ -48,6 +48,7 @@ var CmdMigrateStorage = &cli.Command{
|
||||||
Value: "",
|
Value: "",
|
||||||
Usage: "New storage placement if store is local (leave blank for default)",
|
Usage: "New storage placement if store is local (leave blank for default)",
|
||||||
},
|
},
|
||||||
|
// Minio Storage special configurations
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "minio-endpoint",
|
Name: "minio-endpoint",
|
||||||
Value: "",
|
Value: "",
|
||||||
|
@ -96,6 +97,32 @@ var CmdMigrateStorage = &cli.Command{
|
||||||
Value: "",
|
Value: "",
|
||||||
Usage: "Minio bucket lookup type",
|
Usage: "Minio bucket lookup type",
|
||||||
},
|
},
|
||||||
|
// Azure Blob Storage special configurations
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "azureblob-endpoint",
|
||||||
|
Value: "",
|
||||||
|
Usage: "Azure Blob storage endpoint",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "azureblob-account-name",
|
||||||
|
Value: "",
|
||||||
|
Usage: "Azure Blob storage account name",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "azureblob-account-key",
|
||||||
|
Value: "",
|
||||||
|
Usage: "Azure Blob storage account key",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "azureblob-container",
|
||||||
|
Value: "",
|
||||||
|
Usage: "Azure Blob storage container",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "azureblob-base-path",
|
||||||
|
Value: "",
|
||||||
|
Usage: "Azure Blob storage base path",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -228,6 +255,18 @@ func runMigrateStorage(ctx *cli.Context) error {
|
||||||
BucketLookUpType: ctx.String("minio-bucket-lookup-type"),
|
BucketLookUpType: ctx.String("minio-bucket-lookup-type"),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
case string(setting.AzureBlobStorageType):
|
||||||
|
dstStorage, err = storage.NewAzureBlobStorage(
|
||||||
|
stdCtx,
|
||||||
|
&setting.Storage{
|
||||||
|
AzureBlobConfig: setting.AzureBlobStorageConfig{
|
||||||
|
Endpoint: ctx.String("azureblob-endpoint"),
|
||||||
|
AccountName: ctx.String("azureblob-account-name"),
|
||||||
|
AccountKey: ctx.String("azureblob-account-key"),
|
||||||
|
Container: ctx.String("azureblob-container"),
|
||||||
|
BasePath: ctx.String("azureblob-base-path"),
|
||||||
|
},
|
||||||
|
})
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unsupported storage type: %s", ctx.String("storage"))
|
return fmt.Errorf("unsupported storage type: %s", ctx.String("storage"))
|
||||||
}
|
}
|
||||||
|
|
|
@ -1862,7 +1862,7 @@ LEVEL = Info
|
||||||
;STORAGE_TYPE = local
|
;STORAGE_TYPE = local
|
||||||
;;
|
;;
|
||||||
;; Allows the storage driver to redirect to authenticated URLs to serve files directly
|
;; Allows the storage driver to redirect to authenticated URLs to serve files directly
|
||||||
;; Currently, only `minio` is supported.
|
;; Currently, only `minio` and `azureblob` is supported.
|
||||||
;SERVE_DIRECT = false
|
;SERVE_DIRECT = false
|
||||||
;;
|
;;
|
||||||
;; Path for attachments. Defaults to `attachments`. Only available when STORAGE_TYPE is `local`
|
;; Path for attachments. Defaults to `attachments`. Only available when STORAGE_TYPE is `local`
|
||||||
|
@ -1901,6 +1901,21 @@ LEVEL = Info
|
||||||
;;
|
;;
|
||||||
;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
|
;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
|
||||||
;MINIO_BUCKET_LOOKUP_TYPE = auto
|
;MINIO_BUCKET_LOOKUP_TYPE = auto
|
||||||
|
;; Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`,
|
||||||
|
;; e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1
|
||||||
|
;AZURE_BLOB_ENDPOINT =
|
||||||
|
;;
|
||||||
|
;; Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob`
|
||||||
|
;AZURE_BLOB_ACCOUNT_NAME =
|
||||||
|
;;
|
||||||
|
;; Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob`
|
||||||
|
;AZURE_BLOB_ACCOUNT_KEY =
|
||||||
|
;;
|
||||||
|
;; Azure Blob container to store the attachments only available when STORAGE_TYPE is `azureblob`
|
||||||
|
;AZURE_BLOB_CONTAINER = gitea
|
||||||
|
;;
|
||||||
|
;; override the azure blob base path if storage type is azureblob
|
||||||
|
;AZURE_BLOB_BASE_PATH = attachments/
|
||||||
|
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
|
@ -2460,6 +2475,11 @@ LEVEL = Info
|
||||||
;STORAGE_TYPE = local
|
;STORAGE_TYPE = local
|
||||||
;; override the minio base path if storage type is minio
|
;; override the minio base path if storage type is minio
|
||||||
;MINIO_BASE_PATH = packages/
|
;MINIO_BASE_PATH = packages/
|
||||||
|
;; override the azure blob base path if storage type is azureblob
|
||||||
|
;AZURE_BLOB_BASE_PATH = packages/
|
||||||
|
;; Allows the storage driver to redirect to authenticated URLs to serve files directly
|
||||||
|
;; Currently, only `minio` and `azureblob` is supported.
|
||||||
|
;SERVE_DIRECT = false
|
||||||
;;
|
;;
|
||||||
;; Path for chunked uploads. Defaults to APP_DATA_PATH + `tmp/package-upload`
|
;; Path for chunked uploads. Defaults to APP_DATA_PATH + `tmp/package-upload`
|
||||||
;CHUNKED_UPLOAD_PATH = tmp/package-upload
|
;CHUNKED_UPLOAD_PATH = tmp/package-upload
|
||||||
|
@ -2533,6 +2553,8 @@ LEVEL = Info
|
||||||
;;
|
;;
|
||||||
;; override the minio base path if storage type is minio
|
;; override the minio base path if storage type is minio
|
||||||
;MINIO_BASE_PATH = repo-archive/
|
;MINIO_BASE_PATH = repo-archive/
|
||||||
|
;; override the azure blob base path if storage type is azureblob
|
||||||
|
;AZURE_BLOB_BASE_PATH = repo-archive/
|
||||||
|
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
|
@ -2554,8 +2576,15 @@ LEVEL = Info
|
||||||
;; Where your lfs files reside, default is data/lfs.
|
;; Where your lfs files reside, default is data/lfs.
|
||||||
;PATH = data/lfs
|
;PATH = data/lfs
|
||||||
;;
|
;;
|
||||||
|
;; Allows the storage driver to redirect to authenticated URLs to serve files directly
|
||||||
|
;; Currently, only `minio` and `azureblob` is supported.
|
||||||
|
;SERVE_DIRECT = false
|
||||||
|
;;
|
||||||
;; override the minio base path if storage type is minio
|
;; override the minio base path if storage type is minio
|
||||||
;MINIO_BASE_PATH = lfs/
|
;MINIO_BASE_PATH = lfs/
|
||||||
|
;;
|
||||||
|
;; override the azure blob base path if storage type is azureblob
|
||||||
|
;AZURE_BLOB_BASE_PATH = lfs/
|
||||||
|
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
|
@ -2570,7 +2599,7 @@ LEVEL = Info
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||||
;; customize storage
|
;; customize storage
|
||||||
;[storage.my_minio]
|
;[storage.minio]
|
||||||
;STORAGE_TYPE = minio
|
;STORAGE_TYPE = minio
|
||||||
;;
|
;;
|
||||||
;; Minio endpoint to connect only available when STORAGE_TYPE is `minio`
|
;; Minio endpoint to connect only available when STORAGE_TYPE is `minio`
|
||||||
|
@ -2600,6 +2629,22 @@ LEVEL = Info
|
||||||
;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
|
;; Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
|
||||||
;MINIO_BUCKET_LOOKUP_TYPE = auto
|
;MINIO_BUCKET_LOOKUP_TYPE = auto
|
||||||
|
|
||||||
|
;[storage.azureblob]
|
||||||
|
;STORAGE_TYPE = azureblob
|
||||||
|
;;
|
||||||
|
;; Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`,
|
||||||
|
;; e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1
|
||||||
|
;AZURE_BLOB_ENDPOINT =
|
||||||
|
;;
|
||||||
|
;; Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob`
|
||||||
|
;AZURE_BLOB_ACCOUNT_NAME =
|
||||||
|
;;
|
||||||
|
;; Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob`
|
||||||
|
;AZURE_BLOB_ACCOUNT_KEY =
|
||||||
|
;;
|
||||||
|
;; Azure Blob container to store the attachments only available when STORAGE_TYPE is `azureblob`
|
||||||
|
;AZURE_BLOB_CONTAINER = gitea
|
||||||
|
|
||||||
;[proxy]
|
;[proxy]
|
||||||
;; Enable the proxy, all requests to external via HTTP will be affected
|
;; Enable the proxy, all requests to external via HTTP will be affected
|
||||||
;PROXY_ENABLED = false
|
;PROXY_ENABLED = false
|
||||||
|
|
|
@ -1287,7 +1287,7 @@ is `data/lfs` and the default of `MINIO_BASE_PATH` is `lfs/`.
|
||||||
|
|
||||||
Default storage configuration for attachments, lfs, avatars, repo-avatars, repo-archive, packages, actions_log, actions_artifact.
|
Default storage configuration for attachments, lfs, avatars, repo-avatars, repo-archive, packages, actions_log, actions_artifact.
|
||||||
|
|
||||||
- `STORAGE_TYPE`: **local**: Storage type, `local` for local disk or `minio` for s3 compatible object storage service.
|
- `STORAGE_TYPE`: **local**: Storage type, `local` for local disk, `minio` for s3 compatible object storage service, `azureblob` for azure blob storage service.
|
||||||
- `SERVE_DIRECT`: **false**: Allows the storage driver to redirect to authenticated URLs to serve files directly. Currently, only Minio/S3 is supported via signed URLs, local does nothing.
|
- `SERVE_DIRECT`: **false**: Allows the storage driver to redirect to authenticated URLs to serve files directly. Currently, only Minio/S3 is supported via signed URLs, local does nothing.
|
||||||
- `MINIO_ENDPOINT`: **localhost:9000**: Minio endpoint to connect only available when `STORAGE_TYPE` is `minio`
|
- `MINIO_ENDPOINT`: **localhost:9000**: Minio endpoint to connect only available when `STORAGE_TYPE` is `minio`
|
||||||
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. If not provided and STORAGE_TYPE is `minio`, will search for credentials in known environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
|
- `MINIO_ACCESS_KEY_ID`: Minio accessKeyID to connect only available when STORAGE_TYPE is `minio`. If not provided and STORAGE_TYPE is `minio`, will search for credentials in known environment variables (MINIO_ACCESS_KEY_ID, AWS_ACCESS_KEY_ID), credentials files (~/.mc/config.json, ~/.aws/credentials), and EC2 instance metadata.
|
||||||
|
@ -1298,6 +1298,12 @@ Default storage configuration for attachments, lfs, avatars, repo-avatars, repo-
|
||||||
- `MINIO_INSECURE_SKIP_VERIFY`: **false**: Minio skip SSL verification available when STORAGE_TYPE is `minio`
|
- `MINIO_INSECURE_SKIP_VERIFY`: **false**: Minio skip SSL verification available when STORAGE_TYPE is `minio`
|
||||||
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
|
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio bucket lookup method defaults to auto mode; set it to `dns` for virtual host style or `path` for path style, only available when STORAGE_TYPE is `minio`
|
||||||
|
|
||||||
|
- `AZURE_BLOB_ENDPOINT`: **_empty_**: Azure Blob endpoint to connect only available when STORAGE_TYPE is `azureblob`,
|
||||||
|
e.g. https://accountname.blob.core.windows.net or http://127.0.0.1:10000/devstoreaccount1
|
||||||
|
- `AZURE_BLOB_ACCOUNT_NAME`: **_empty_**: Azure Blob account name to connect only available when STORAGE_TYPE is `azureblob`
|
||||||
|
- `AZURE_BLOB_ACCOUNT_KEY`: **_empty_**: Azure Blob account key to connect only available when STORAGE_TYPE is `azureblob`
|
||||||
|
- `AZURE_BLOB_CONTAINER`: **gitea**: Azure Blob container to store the data only available when STORAGE_TYPE is `azureblob`
|
||||||
|
|
||||||
The recommended storage configuration for minio like below:
|
The recommended storage configuration for minio like below:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
|
|
|
@ -1208,7 +1208,7 @@ ALLOW_DATA_URI_IMAGES = true
|
||||||
|
|
||||||
默认的附件、lfs、头像、仓库头像、仓库归档、软件包、操作日志、操作艺术品的存储配置。
|
默认的附件、lfs、头像、仓库头像、仓库归档、软件包、操作日志、操作艺术品的存储配置。
|
||||||
|
|
||||||
- `STORAGE_TYPE`:**local**:存储类型,`local` 表示本地磁盘,`minio` 表示 S3 兼容的对象存储服务。
|
- `STORAGE_TYPE`:**local**:存储类型,`local` 表示本地磁盘,`minio` 表示 S3,`azureblob` 表示 azure 对象存储。
|
||||||
- `SERVE_DIRECT`:**false**:允许存储驱动程序重定向到经过身份验证的 URL 以直接提供文件。目前,仅支持通过签名的 URL 提供 Minio/S3,本地不执行任何操作。
|
- `SERVE_DIRECT`:**false**:允许存储驱动程序重定向到经过身份验证的 URL 以直接提供文件。目前,仅支持通过签名的 URL 提供 Minio/S3,本地不执行任何操作。
|
||||||
- `MINIO_ENDPOINT`:**localhost:9000**:连接的 Minio 终端点,仅在 `STORAGE_TYPE` 为 `minio` 时可用。
|
- `MINIO_ENDPOINT`:**localhost:9000**:连接的 Minio 终端点,仅在 `STORAGE_TYPE` 为 `minio` 时可用。
|
||||||
- `MINIO_ACCESS_KEY_ID`:Minio 的 accessKeyID,仅在 `STORAGE_TYPE` 为 `minio` 时可用。
|
- `MINIO_ACCESS_KEY_ID`:Minio 的 accessKeyID,仅在 `STORAGE_TYPE` 为 `minio` 时可用。
|
||||||
|
@ -1219,6 +1219,11 @@ ALLOW_DATA_URI_IMAGES = true
|
||||||
- `MINIO_INSECURE_SKIP_VERIFY`:**false**:Minio 跳过 SSL 验证,仅在 `STORAGE_TYPE` 为 `minio` 时可用。
|
- `MINIO_INSECURE_SKIP_VERIFY`:**false**:Minio 跳过 SSL 验证,仅在 `STORAGE_TYPE` 为 `minio` 时可用。
|
||||||
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio的bucket查找方式默认为`auto`模式,可将其设置为`dns`(虚拟托管样式)或`path`(路径样式),仅当`STORAGE_TYPE`为`minio`时可用。
|
- `MINIO_BUCKET_LOOKUP_TYPE`: **auto**: Minio的bucket查找方式默认为`auto`模式,可将其设置为`dns`(虚拟托管样式)或`path`(路径样式),仅当`STORAGE_TYPE`为`minio`时可用。
|
||||||
|
|
||||||
|
- `AZURE_BLOB_ENDPOINT`: **_empty_**: Azure Blob 终端点,仅在 `STORAGE_TYPE` 为 `azureblob` 时可用。例如:https://accountname.blob.core.windows.net 或 http://127.0.0.1:10000/devstoreaccount1
|
||||||
|
- `AZURE_BLOB_ACCOUNT_NAME`: **_empty_**: Azure Blob 账号名,仅在 `STORAGE_TYPE` 为 `azureblob` 时可用。
|
||||||
|
- `AZURE_BLOB_ACCOUNT_KEY`: **_empty_**: Azure Blob 访问密钥,仅在 `STORAGE_TYPE` 为 `azureblob` 时可用。
|
||||||
|
- `AZURE_BLOB_CONTAINER`: **gitea**: 用于存储数据的 Azure Blob 容器名,仅在 `STORAGE_TYPE` 为 `azureblob` 时可用。
|
||||||
|
|
||||||
建议的 minio 存储配置如下:
|
建议的 minio 存储配置如下:
|
||||||
|
|
||||||
```ini
|
```ini
|
||||||
|
|
3
go.mod
3
go.mod
|
@ -15,6 +15,8 @@ require (
|
||||||
gitea.com/lunny/dingtalk_webhook v0.0.0-20171025031554-e3534c89ef96
|
gitea.com/lunny/dingtalk_webhook v0.0.0-20171025031554-e3534c89ef96
|
||||||
gitea.com/lunny/levelqueue v0.4.2-0.20230414023320-3c0159fe0fe4
|
gitea.com/lunny/levelqueue v0.4.2-0.20230414023320-3c0159fe0fe4
|
||||||
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121
|
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
||||||
github.com/ProtonMail/go-crypto v1.0.0
|
github.com/ProtonMail/go-crypto v1.0.0
|
||||||
github.com/PuerkitoBio/goquery v1.9.1
|
github.com/PuerkitoBio/goquery v1.9.1
|
||||||
|
@ -130,6 +132,7 @@ require (
|
||||||
dario.cat/mergo v1.0.0 // indirect
|
dario.cat/mergo v1.0.0 // indirect
|
||||||
filippo.io/edwards25519 v1.1.0 // indirect
|
filippo.io/edwards25519 v1.1.0 // indirect
|
||||||
git.sr.ht/~mariusor/go-xsd-duration v0.0.0-20220703122237-02e73435a078 // indirect
|
git.sr.ht/~mariusor/go-xsd-duration v0.0.0-20220703122237-02e73435a078 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
||||||
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
||||||
github.com/ClickHouse/clickhouse-go/v2 v2.22.0 // indirect
|
github.com/ClickHouse/clickhouse-go/v2 v2.22.0 // indirect
|
||||||
github.com/DataDog/zstd v1.5.5 // indirect
|
github.com/DataDog/zstd v1.5.5 // indirect
|
||||||
|
|
14
go.sum
14
go.sum
|
@ -38,16 +38,20 @@ github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121 h1:r3qt8PCHnfjOv9PN3H
|
||||||
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121/go.mod h1:Ock8XgA7pvULhIaHGAk/cDnRfNrF9Jey81nPcc403iU=
|
github.com/42wim/sshsig v0.0.0-20211121163825-841cf5bbc121/go.mod h1:Ock8XgA7pvULhIaHGAk/cDnRfNrF9Jey81nPcc403iU=
|
||||||
github.com/6543/go-version v1.3.1 h1:HvOp+Telns7HWJ2Xo/05YXQSB2bE0WmVgbHqwMPZT4U=
|
github.com/6543/go-version v1.3.1 h1:HvOp+Telns7HWJ2Xo/05YXQSB2bE0WmVgbHqwMPZT4U=
|
||||||
github.com/6543/go-version v1.3.1/go.mod h1:oqFAHCwtLVUTLdhQmVZWYvaHXTdsbB4SY85at64SQEo=
|
github.com/6543/go-version v1.3.1/go.mod h1:oqFAHCwtLVUTLdhQmVZWYvaHXTdsbB4SY85at64SQEo=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=
|
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
|
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
|
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
|
github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
|
||||||
|
@ -227,6 +231,8 @@ github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55k
|
||||||
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||||
|
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||||
|
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s=
|
||||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||||
|
|
|
@ -5,11 +5,14 @@ package repo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"code.gitea.io/gitea/models/db"
|
"code.gitea.io/gitea/models/db"
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/storage"
|
"code.gitea.io/gitea/modules/storage"
|
||||||
"code.gitea.io/gitea/modules/timeutil"
|
"code.gitea.io/gitea/modules/timeutil"
|
||||||
|
@ -188,8 +191,11 @@ func DeleteAttachments(ctx context.Context, attachments []*Attachment, remove bo
|
||||||
if remove {
|
if remove {
|
||||||
for i, a := range attachments {
|
for i, a := range attachments {
|
||||||
if err := storage.Attachments.Delete(a.RelativePath()); err != nil {
|
if err := storage.Attachments.Delete(a.RelativePath()); err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
return i, err
|
return i, err
|
||||||
}
|
}
|
||||||
|
log.Warn("Attachment file not found when deleting: %s", a.RelativePath())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return int(cnt), nil
|
return int(cnt), nil
|
||||||
|
|
|
@ -34,7 +34,7 @@ func (s *ContentStore) Get(key BlobHash256Key) (storage.Object, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ContentStore) ShouldServeDirect() bool {
|
func (s *ContentStore) ShouldServeDirect() bool {
|
||||||
return setting.Packages.Storage.MinioConfig.ServeDirect
|
return setting.Packages.Storage.ServeDirect()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ContentStore) GetServeDirectURL(key BlobHash256Key, filename string) (*url.URL, error) {
|
func (s *ContentStore) GetServeDirectURL(key BlobHash256Key, filename string) (*url.URL, error) {
|
||||||
|
|
|
@ -18,11 +18,14 @@ const (
|
||||||
LocalStorageType StorageType = "local"
|
LocalStorageType StorageType = "local"
|
||||||
// MinioStorageType is the type descriptor for minio storage
|
// MinioStorageType is the type descriptor for minio storage
|
||||||
MinioStorageType StorageType = "minio"
|
MinioStorageType StorageType = "minio"
|
||||||
|
// AzureBlobStorageType is the type descriptor for azure blob storage
|
||||||
|
AzureBlobStorageType StorageType = "azureblob"
|
||||||
)
|
)
|
||||||
|
|
||||||
var storageTypes = []StorageType{
|
var storageTypes = []StorageType{
|
||||||
LocalStorageType,
|
LocalStorageType,
|
||||||
MinioStorageType,
|
MinioStorageType,
|
||||||
|
AzureBlobStorageType,
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsValidStorageType returns true if the given storage type is valid
|
// IsValidStorageType returns true if the given storage type is valid
|
||||||
|
@ -50,25 +53,55 @@ type MinioStorageConfig struct {
|
||||||
BucketLookUpType string `ini:"MINIO_BUCKET_LOOKUP_TYPE" json:",omitempty"`
|
BucketLookUpType string `ini:"MINIO_BUCKET_LOOKUP_TYPE" json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cfg *MinioStorageConfig) ToShadow() {
|
||||||
|
if cfg.AccessKeyID != "" {
|
||||||
|
cfg.AccessKeyID = "******"
|
||||||
|
}
|
||||||
|
if cfg.SecretAccessKey != "" {
|
||||||
|
cfg.SecretAccessKey = "******"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinioStorageConfig represents the configuration for a minio storage
|
||||||
|
type AzureBlobStorageConfig struct {
|
||||||
|
Endpoint string `ini:"AZURE_BLOB_ENDPOINT" json:",omitempty"`
|
||||||
|
AccountName string `ini:"AZURE_BLOB_ACCOUNT_NAME" json:",omitempty"`
|
||||||
|
AccountKey string `ini:"AZURE_BLOB_ACCOUNT_KEY" json:",omitempty"`
|
||||||
|
Container string `ini:"AZURE_BLOB_CONTAINER" json:",omitempty"`
|
||||||
|
BasePath string `ini:"AZURE_BLOB_BASE_PATH" json:",omitempty"`
|
||||||
|
ServeDirect bool `ini:"SERVE_DIRECT"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cfg *AzureBlobStorageConfig) ToShadow() {
|
||||||
|
if cfg.AccountKey != "" {
|
||||||
|
cfg.AccountKey = "******"
|
||||||
|
}
|
||||||
|
if cfg.AccountName != "" {
|
||||||
|
cfg.AccountName = "******"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Storage represents configuration of storages
|
// Storage represents configuration of storages
|
||||||
type Storage struct {
|
type Storage struct {
|
||||||
Type StorageType // local or minio
|
Type StorageType // local or minio or azureblob
|
||||||
Path string `json:",omitempty"` // for local type
|
Path string `json:",omitempty"` // for local type
|
||||||
TemporaryPath string `json:",omitempty"`
|
TemporaryPath string `json:",omitempty"`
|
||||||
MinioConfig MinioStorageConfig // for minio type
|
MinioConfig MinioStorageConfig // for minio type
|
||||||
|
AzureBlobConfig AzureBlobStorageConfig // for azureblob type
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *Storage) ToShadowCopy() Storage {
|
func (storage *Storage) ToShadowCopy() Storage {
|
||||||
shadowStorage := *storage
|
shadowStorage := *storage
|
||||||
if shadowStorage.MinioConfig.AccessKeyID != "" {
|
shadowStorage.MinioConfig.ToShadow()
|
||||||
shadowStorage.MinioConfig.AccessKeyID = "******"
|
shadowStorage.AzureBlobConfig.ToShadow()
|
||||||
}
|
|
||||||
if shadowStorage.MinioConfig.SecretAccessKey != "" {
|
|
||||||
shadowStorage.MinioConfig.SecretAccessKey = "******"
|
|
||||||
}
|
|
||||||
return shadowStorage
|
return shadowStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (storage *Storage) ServeDirect() bool {
|
||||||
|
return (storage.Type == MinioStorageType && storage.MinioConfig.ServeDirect) ||
|
||||||
|
(storage.Type == AzureBlobStorageType && storage.AzureBlobConfig.ServeDirect)
|
||||||
|
}
|
||||||
|
|
||||||
const storageSectionName = "storage"
|
const storageSectionName = "storage"
|
||||||
|
|
||||||
func getDefaultStorageSection(rootCfg ConfigProvider) ConfigSection {
|
func getDefaultStorageSection(rootCfg ConfigProvider) ConfigSection {
|
||||||
|
@ -84,6 +117,10 @@ func getDefaultStorageSection(rootCfg ConfigProvider) ConfigSection {
|
||||||
storageSec.Key("MINIO_INSECURE_SKIP_VERIFY").MustBool(false)
|
storageSec.Key("MINIO_INSECURE_SKIP_VERIFY").MustBool(false)
|
||||||
storageSec.Key("MINIO_CHECKSUM_ALGORITHM").MustString("default")
|
storageSec.Key("MINIO_CHECKSUM_ALGORITHM").MustString("default")
|
||||||
storageSec.Key("MINIO_BUCKET_LOOKUP_TYPE").MustString("auto")
|
storageSec.Key("MINIO_BUCKET_LOOKUP_TYPE").MustString("auto")
|
||||||
|
storageSec.Key("AZURE_BLOB_ENDPOINT").MustString("")
|
||||||
|
storageSec.Key("AZURE_BLOB_ACCOUNT_NAME").MustString("")
|
||||||
|
storageSec.Key("AZURE_BLOB_ACCOUNT_KEY").MustString("")
|
||||||
|
storageSec.Key("AZURE_BLOB_CONTAINER").MustString("gitea")
|
||||||
return storageSec
|
return storageSec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,6 +144,8 @@ func getStorage(rootCfg ConfigProvider, name, typ string, sec ConfigSection) (*S
|
||||||
return getStorageForLocal(targetSec, overrideSec, tp, name)
|
return getStorageForLocal(targetSec, overrideSec, tp, name)
|
||||||
case string(MinioStorageType):
|
case string(MinioStorageType):
|
||||||
return getStorageForMinio(targetSec, overrideSec, tp, name)
|
return getStorageForMinio(targetSec, overrideSec, tp, name)
|
||||||
|
case string(AzureBlobStorageType):
|
||||||
|
return getStorageForAzureBlob(targetSec, overrideSec, tp, name)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unsupported storage type %q", targetType)
|
return nil, fmt.Errorf("unsupported storage type %q", targetType)
|
||||||
}
|
}
|
||||||
|
@ -247,7 +286,7 @@ func getStorageForLocal(targetSec, overrideSec ConfigSection, tp targetSecType,
|
||||||
return &storage, nil
|
return &storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getStorageForMinio(targetSec, overrideSec ConfigSection, tp targetSecType, name string) (*Storage, error) {
|
func getStorageForMinio(targetSec, overrideSec ConfigSection, tp targetSecType, name string) (*Storage, error) { //nolint:dupl
|
||||||
var storage Storage
|
var storage Storage
|
||||||
storage.Type = StorageType(targetSec.Key("STORAGE_TYPE").String())
|
storage.Type = StorageType(targetSec.Key("STORAGE_TYPE").String())
|
||||||
if err := targetSec.MapTo(&storage.MinioConfig); err != nil {
|
if err := targetSec.MapTo(&storage.MinioConfig); err != nil {
|
||||||
|
@ -275,3 +314,32 @@ func getStorageForMinio(targetSec, overrideSec ConfigSection, tp targetSecType,
|
||||||
}
|
}
|
||||||
return &storage, nil
|
return &storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getStorageForAzureBlob(targetSec, overrideSec ConfigSection, tp targetSecType, name string) (*Storage, error) { //nolint:dupl
|
||||||
|
var storage Storage
|
||||||
|
storage.Type = StorageType(targetSec.Key("STORAGE_TYPE").String())
|
||||||
|
if err := targetSec.MapTo(&storage.AzureBlobConfig); err != nil {
|
||||||
|
return nil, fmt.Errorf("map azure blob config failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultPath string
|
||||||
|
if storage.AzureBlobConfig.BasePath != "" {
|
||||||
|
if tp == targetSecIsStorage || tp == targetSecIsDefault {
|
||||||
|
defaultPath = strings.TrimSuffix(storage.AzureBlobConfig.BasePath, "/") + "/" + name + "/"
|
||||||
|
} else {
|
||||||
|
defaultPath = storage.AzureBlobConfig.BasePath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if defaultPath == "" {
|
||||||
|
defaultPath = name + "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
if overrideSec != nil {
|
||||||
|
storage.AzureBlobConfig.ServeDirect = ConfigSectionKeyBool(overrideSec, "SERVE_DIRECT", storage.AzureBlobConfig.ServeDirect)
|
||||||
|
storage.AzureBlobConfig.BasePath = ConfigSectionKeyString(overrideSec, "AZURE_BLOB_BASE_PATH", defaultPath)
|
||||||
|
storage.AzureBlobConfig.Container = ConfigSectionKeyString(overrideSec, "AZURE_BLOB_CONTAINER", storage.AzureBlobConfig.Container)
|
||||||
|
} else {
|
||||||
|
storage.AzureBlobConfig.BasePath = defaultPath
|
||||||
|
}
|
||||||
|
return &storage, nil
|
||||||
|
}
|
||||||
|
|
|
@ -97,6 +97,44 @@ STORAGE_TYPE = minio
|
||||||
assert.EqualValues(t, "repo-avatars/", RepoAvatar.Storage.MinioConfig.BasePath)
|
assert.EqualValues(t, "repo-avatars/", RepoAvatar.Storage.MinioConfig.BasePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_getStorageInheritStorageTypeAzureBlob(t *testing.T) {
|
||||||
|
iniStr := `
|
||||||
|
[storage]
|
||||||
|
STORAGE_TYPE = azureblob
|
||||||
|
`
|
||||||
|
cfg, err := NewConfigProviderFromData(iniStr)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.NoError(t, loadPackagesFrom(cfg))
|
||||||
|
assert.EqualValues(t, "azureblob", Packages.Storage.Type)
|
||||||
|
assert.EqualValues(t, "gitea", Packages.Storage.AzureBlobConfig.Container)
|
||||||
|
assert.EqualValues(t, "packages/", Packages.Storage.AzureBlobConfig.BasePath)
|
||||||
|
|
||||||
|
assert.NoError(t, loadRepoArchiveFrom(cfg))
|
||||||
|
assert.EqualValues(t, "azureblob", RepoArchive.Storage.Type)
|
||||||
|
assert.EqualValues(t, "gitea", RepoArchive.Storage.AzureBlobConfig.Container)
|
||||||
|
assert.EqualValues(t, "repo-archive/", RepoArchive.Storage.AzureBlobConfig.BasePath)
|
||||||
|
|
||||||
|
assert.NoError(t, loadActionsFrom(cfg))
|
||||||
|
assert.EqualValues(t, "azureblob", Actions.LogStorage.Type)
|
||||||
|
assert.EqualValues(t, "gitea", Actions.LogStorage.AzureBlobConfig.Container)
|
||||||
|
assert.EqualValues(t, "actions_log/", Actions.LogStorage.AzureBlobConfig.BasePath)
|
||||||
|
|
||||||
|
assert.EqualValues(t, "azureblob", Actions.ArtifactStorage.Type)
|
||||||
|
assert.EqualValues(t, "gitea", Actions.ArtifactStorage.AzureBlobConfig.Container)
|
||||||
|
assert.EqualValues(t, "actions_artifacts/", Actions.ArtifactStorage.AzureBlobConfig.BasePath)
|
||||||
|
|
||||||
|
assert.NoError(t, loadAvatarsFrom(cfg))
|
||||||
|
assert.EqualValues(t, "azureblob", Avatar.Storage.Type)
|
||||||
|
assert.EqualValues(t, "gitea", Avatar.Storage.AzureBlobConfig.Container)
|
||||||
|
assert.EqualValues(t, "avatars/", Avatar.Storage.AzureBlobConfig.BasePath)
|
||||||
|
|
||||||
|
assert.NoError(t, loadRepoAvatarFrom(cfg))
|
||||||
|
assert.EqualValues(t, "azureblob", RepoAvatar.Storage.Type)
|
||||||
|
assert.EqualValues(t, "gitea", RepoAvatar.Storage.AzureBlobConfig.Container)
|
||||||
|
assert.EqualValues(t, "repo-avatars/", RepoAvatar.Storage.AzureBlobConfig.BasePath)
|
||||||
|
}
|
||||||
|
|
||||||
type testLocalStoragePathCase struct {
|
type testLocalStoragePathCase struct {
|
||||||
loader func(rootCfg ConfigProvider) error
|
loader func(rootCfg ConfigProvider) error
|
||||||
storagePtr **Storage
|
storagePtr **Storage
|
||||||
|
@ -465,3 +503,77 @@ MINIO_BASE_PATH = /lfs
|
||||||
assert.EqualValues(t, true, LFS.Storage.MinioConfig.UseSSL)
|
assert.EqualValues(t, true, LFS.Storage.MinioConfig.UseSSL)
|
||||||
assert.EqualValues(t, "/lfs", LFS.Storage.MinioConfig.BasePath)
|
assert.EqualValues(t, "/lfs", LFS.Storage.MinioConfig.BasePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_getStorageConfiguration29(t *testing.T) {
|
||||||
|
cfg, err := NewConfigProviderFromData(`
|
||||||
|
[repo-archive]
|
||||||
|
STORAGE_TYPE = azureblob
|
||||||
|
AZURE_BLOB_ACCOUNT_NAME = my_account_name
|
||||||
|
AZURE_BLOB_ACCOUNT_KEY = my_account_key
|
||||||
|
`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// assert.Error(t, loadRepoArchiveFrom(cfg))
|
||||||
|
// FIXME: this should return error but now ini package's MapTo() doesn't check type
|
||||||
|
assert.NoError(t, loadRepoArchiveFrom(cfg))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_getStorageConfiguration30(t *testing.T) {
|
||||||
|
cfg, err := NewConfigProviderFromData(`
|
||||||
|
[storage.repo-archive]
|
||||||
|
STORAGE_TYPE = azureblob
|
||||||
|
AZURE_BLOB_ACCOUNT_NAME = my_account_name
|
||||||
|
AZURE_BLOB_ACCOUNT_KEY = my_account_key
|
||||||
|
`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, loadRepoArchiveFrom(cfg))
|
||||||
|
assert.EqualValues(t, "my_account_name", RepoArchive.Storage.AzureBlobConfig.AccountName)
|
||||||
|
assert.EqualValues(t, "my_account_key", RepoArchive.Storage.AzureBlobConfig.AccountKey)
|
||||||
|
assert.EqualValues(t, "repo-archive/", RepoArchive.Storage.AzureBlobConfig.BasePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_getStorageConfiguration31(t *testing.T) {
|
||||||
|
cfg, err := NewConfigProviderFromData(`
|
||||||
|
[storage]
|
||||||
|
STORAGE_TYPE = azureblob
|
||||||
|
AZURE_BLOB_ACCOUNT_NAME = my_account_name
|
||||||
|
AZURE_BLOB_ACCOUNT_KEY = my_account_key
|
||||||
|
AZURE_BLOB_BASE_PATH = /prefix
|
||||||
|
`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, loadRepoArchiveFrom(cfg))
|
||||||
|
assert.EqualValues(t, "my_account_name", RepoArchive.Storage.AzureBlobConfig.AccountName)
|
||||||
|
assert.EqualValues(t, "my_account_key", RepoArchive.Storage.AzureBlobConfig.AccountKey)
|
||||||
|
assert.EqualValues(t, "/prefix/repo-archive/", RepoArchive.Storage.AzureBlobConfig.BasePath)
|
||||||
|
|
||||||
|
cfg, err = NewConfigProviderFromData(`
|
||||||
|
[storage]
|
||||||
|
STORAGE_TYPE = azureblob
|
||||||
|
AZURE_BLOB_ACCOUNT_NAME = my_account_name
|
||||||
|
AZURE_BLOB_ACCOUNT_KEY = my_account_key
|
||||||
|
AZURE_BLOB_BASE_PATH = /prefix
|
||||||
|
|
||||||
|
[lfs]
|
||||||
|
AZURE_BLOB_BASE_PATH = /lfs
|
||||||
|
`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, loadLFSFrom(cfg))
|
||||||
|
assert.EqualValues(t, "my_account_name", LFS.Storage.AzureBlobConfig.AccountName)
|
||||||
|
assert.EqualValues(t, "my_account_key", LFS.Storage.AzureBlobConfig.AccountKey)
|
||||||
|
assert.EqualValues(t, "/lfs", LFS.Storage.AzureBlobConfig.BasePath)
|
||||||
|
|
||||||
|
cfg, err = NewConfigProviderFromData(`
|
||||||
|
[storage]
|
||||||
|
STORAGE_TYPE = azureblob
|
||||||
|
AZURE_BLOB_ACCOUNT_NAME = my_account_name
|
||||||
|
AZURE_BLOB_ACCOUNT_KEY = my_account_key
|
||||||
|
AZURE_BLOB_BASE_PATH = /prefix
|
||||||
|
|
||||||
|
[storage.lfs]
|
||||||
|
AZURE_BLOB_BASE_PATH = /lfs
|
||||||
|
`)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NoError(t, loadLFSFrom(cfg))
|
||||||
|
assert.EqualValues(t, "my_account_name", LFS.Storage.AzureBlobConfig.AccountName)
|
||||||
|
assert.EqualValues(t, "my_account_key", LFS.Storage.AzureBlobConfig.AccountKey)
|
||||||
|
assert.EqualValues(t, "/lfs", LFS.Storage.AzureBlobConfig.BasePath)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,322 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Object = &azureBlobObject{}
|
||||||
|
|
||||||
|
type azureBlobObject struct {
|
||||||
|
blobClient *blob.Client
|
||||||
|
Context context.Context
|
||||||
|
Name string
|
||||||
|
Size int64
|
||||||
|
ModTime *time.Time
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureBlobObject) Read(p []byte) (int, error) {
|
||||||
|
// TODO: improve the performance, we can implement another interface, maybe implement io.WriteTo
|
||||||
|
if a.offset >= a.Size {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
count := min(int64(len(p)), a.Size-a.offset)
|
||||||
|
|
||||||
|
res, err := a.blobClient.DownloadBuffer(a.Context, p, &blob.DownloadBufferOptions{
|
||||||
|
Range: blob.HTTPRange{
|
||||||
|
Offset: a.offset,
|
||||||
|
Count: count,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
a.offset += res
|
||||||
|
|
||||||
|
return int(res), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureBlobObject) Close() error {
|
||||||
|
a.offset = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureBlobObject) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
switch whence {
|
||||||
|
case io.SeekStart:
|
||||||
|
case io.SeekCurrent:
|
||||||
|
offset += a.offset
|
||||||
|
case io.SeekEnd:
|
||||||
|
offset = a.Size - offset
|
||||||
|
default:
|
||||||
|
return 0, errors.New("Seek: invalid whence")
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > a.Size {
|
||||||
|
return 0, errors.New("Seek: invalid offset")
|
||||||
|
} else if offset < 0 {
|
||||||
|
return 0, errors.New("Seek: invalid offset")
|
||||||
|
}
|
||||||
|
a.offset = offset
|
||||||
|
return a.offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *azureBlobObject) Stat() (os.FileInfo, error) {
|
||||||
|
return &azureBlobFileInfo{
|
||||||
|
a.Name,
|
||||||
|
a.Size,
|
||||||
|
*a.ModTime,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ ObjectStorage = &AzureBlobStorage{}
|
||||||
|
|
||||||
|
// AzureStorage returns a azure blob storage
|
||||||
|
type AzureBlobStorage struct {
|
||||||
|
cfg *setting.AzureBlobStorageConfig
|
||||||
|
ctx context.Context
|
||||||
|
credential *azblob.SharedKeyCredential
|
||||||
|
client *azblob.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertAzureBlobErr(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if bloberror.HasCode(err, bloberror.BlobNotFound) {
|
||||||
|
return os.ErrNotExist
|
||||||
|
}
|
||||||
|
var respErr *azcore.ResponseError
|
||||||
|
if !errors.As(err, &respErr) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fmt.Errorf(respErr.ErrorCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAzureBlobStorage returns a azure blob storage
|
||||||
|
func NewAzureBlobStorage(ctx context.Context, cfg *setting.Storage) (ObjectStorage, error) {
|
||||||
|
config := cfg.AzureBlobConfig
|
||||||
|
|
||||||
|
log.Info("Creating Azure Blob storage at %s:%s with base path %s", config.Endpoint, config.Container, config.BasePath)
|
||||||
|
|
||||||
|
cred, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
client, err := azblob.NewClientWithSharedKeyCredential(config.Endpoint, cred, &azblob.ClientOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = client.CreateContainer(ctx, config.Container, &container.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
// Check to see if we already own this container (which happens if you run this twice)
|
||||||
|
if !bloberror.HasCode(err, bloberror.ContainerAlreadyExists) {
|
||||||
|
return nil, convertMinioErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &AzureBlobStorage{
|
||||||
|
cfg: &config,
|
||||||
|
ctx: ctx,
|
||||||
|
credential: cred,
|
||||||
|
client: client,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AzureBlobStorage) buildAzureBlobPath(p string) string {
|
||||||
|
p = util.PathJoinRelX(a.cfg.BasePath, p)
|
||||||
|
if p == "." || p == "/" {
|
||||||
|
p = "" // azure uses prefix, so path should be empty as relative path
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *AzureBlobStorage) getObjectNameFromPath(path string) string {
|
||||||
|
s := strings.Split(path, "/")
|
||||||
|
return s[len(s)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens a file
|
||||||
|
func (a *AzureBlobStorage) Open(path string) (Object, error) {
|
||||||
|
blobClient, err := a.getBlobClient(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
return &azureBlobObject{
|
||||||
|
Context: a.ctx,
|
||||||
|
blobClient: blobClient,
|
||||||
|
Name: a.getObjectNameFromPath(path),
|
||||||
|
Size: *res.ContentLength,
|
||||||
|
ModTime: res.LastModified,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save saves a file to azure blob storage
|
||||||
|
func (a *AzureBlobStorage) Save(path string, r io.Reader, size int64) (int64, error) {
|
||||||
|
rd := util.NewCountingReader(r)
|
||||||
|
_, err := a.client.UploadStream(
|
||||||
|
a.ctx,
|
||||||
|
a.cfg.Container,
|
||||||
|
a.buildAzureBlobPath(path),
|
||||||
|
rd,
|
||||||
|
// TODO: support set block size and concurrency
|
||||||
|
&blockblob.UploadStreamOptions{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
return int64(rd.Count()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type azureBlobFileInfo struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
modTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a azureBlobFileInfo) Name() string {
|
||||||
|
return path.Base(a.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a azureBlobFileInfo) Size() int64 {
|
||||||
|
return a.size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a azureBlobFileInfo) ModTime() time.Time {
|
||||||
|
return a.modTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a azureBlobFileInfo) IsDir() bool {
|
||||||
|
return strings.HasSuffix(a.name, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a azureBlobFileInfo) Mode() os.FileMode {
|
||||||
|
return os.ModePerm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a azureBlobFileInfo) Sys() any {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns the stat information of the object
|
||||||
|
func (a *AzureBlobStorage) Stat(path string) (os.FileInfo, error) {
|
||||||
|
blobClient, err := a.getBlobClient(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
res, err := blobClient.GetProperties(a.ctx, &blob.GetPropertiesOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
s := strings.Split(path, "/")
|
||||||
|
return &azureBlobFileInfo{
|
||||||
|
s[len(s)-1],
|
||||||
|
*res.ContentLength,
|
||||||
|
*res.LastModified,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete delete a file
|
||||||
|
func (a *AzureBlobStorage) Delete(path string) error {
|
||||||
|
blobClient, err := a.getBlobClient(path)
|
||||||
|
if err != nil {
|
||||||
|
return convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
_, err = blobClient.Delete(a.ctx, nil)
|
||||||
|
return convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL gets the redirect URL to a file. The presigned link is valid for 5 minutes.
|
||||||
|
func (a *AzureBlobStorage) URL(path, name string) (*url.URL, error) {
|
||||||
|
blobClient, err := a.getBlobClient(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
u, err := blobClient.GetSASURL(sas.BlobPermissions{
|
||||||
|
Read: true,
|
||||||
|
}, time.Now().Add(5*time.Minute), &blob.GetSASURLOptions{
|
||||||
|
StartTime: &startTime,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return url.Parse(u)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IterateObjects iterates across the objects in the azureblobstorage
|
||||||
|
func (a *AzureBlobStorage) IterateObjects(dirName string, fn func(path string, obj Object) error) error {
|
||||||
|
dirName = a.buildAzureBlobPath(dirName)
|
||||||
|
if dirName != "" {
|
||||||
|
dirName += "/"
|
||||||
|
}
|
||||||
|
pager := a.client.NewListBlobsFlatPager(a.cfg.Container, &container.ListBlobsFlatOptions{
|
||||||
|
Prefix: &dirName,
|
||||||
|
})
|
||||||
|
for pager.More() {
|
||||||
|
resp, err := pager.NextPage(a.ctx)
|
||||||
|
if err != nil {
|
||||||
|
return convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
for _, object := range resp.Segment.BlobItems {
|
||||||
|
blobClient, err := a.getBlobClient(*object.Name)
|
||||||
|
if err != nil {
|
||||||
|
return convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
object := &azureBlobObject{
|
||||||
|
Context: a.ctx,
|
||||||
|
blobClient: blobClient,
|
||||||
|
Name: *object.Name,
|
||||||
|
Size: *object.Properties.ContentLength,
|
||||||
|
ModTime: object.Properties.LastModified,
|
||||||
|
}
|
||||||
|
if err := func(object *azureBlobObject, fn func(path string, obj Object) error) error {
|
||||||
|
defer object.Close()
|
||||||
|
return fn(strings.TrimPrefix(object.Name, a.cfg.BasePath), object)
|
||||||
|
}(object, fn); err != nil {
|
||||||
|
return convertAzureBlobErr(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete delete a file
|
||||||
|
func (a *AzureBlobStorage) getBlobClient(path string) (*blob.Client, error) {
|
||||||
|
return a.client.ServiceClient().NewContainerClient(a.cfg.Container).NewBlobClient(a.buildAzureBlobPath(path)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RegisterStorageType(setting.AzureBlobStorageType, NewAzureBlobStorage)
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
// Copyright 2023 The Gitea Authors. All rights reserved.
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAzureBlobStorageIterator(t *testing.T) {
|
||||||
|
if os.Getenv("CI") == "" {
|
||||||
|
t.Skip("azureBlobStorage not present outside of CI")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
testStorageIterator(t, setting.AzureBlobStorageType, &setting.Storage{
|
||||||
|
AzureBlobConfig: setting.AzureBlobStorageConfig{
|
||||||
|
// https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#ip-style-url
|
||||||
|
Endpoint: "http://devstoreaccount1.azurite.local:10000",
|
||||||
|
// https://learn.microsoft.com/azure/storage/common/storage-use-azurite?tabs=visual-studio-code#well-known-storage-account-and-key
|
||||||
|
AccountName: "devstoreaccount1",
|
||||||
|
AccountKey: "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==",
|
||||||
|
Container: "test",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAzureBlobStoragePath(t *testing.T) {
|
||||||
|
m := &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: ""}}
|
||||||
|
assert.Equal(t, "", m.buildAzureBlobPath("/"))
|
||||||
|
assert.Equal(t, "", m.buildAzureBlobPath("."))
|
||||||
|
assert.Equal(t, "a", m.buildAzureBlobPath("/a"))
|
||||||
|
assert.Equal(t, "a/b", m.buildAzureBlobPath("/a/b/"))
|
||||||
|
|
||||||
|
m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/"}}
|
||||||
|
assert.Equal(t, "", m.buildAzureBlobPath("/"))
|
||||||
|
assert.Equal(t, "", m.buildAzureBlobPath("."))
|
||||||
|
assert.Equal(t, "a", m.buildAzureBlobPath("/a"))
|
||||||
|
assert.Equal(t, "a/b", m.buildAzureBlobPath("/a/b/"))
|
||||||
|
|
||||||
|
m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/base"}}
|
||||||
|
assert.Equal(t, "base", m.buildAzureBlobPath("/"))
|
||||||
|
assert.Equal(t, "base", m.buildAzureBlobPath("."))
|
||||||
|
assert.Equal(t, "base/a", m.buildAzureBlobPath("/a"))
|
||||||
|
assert.Equal(t, "base/a/b", m.buildAzureBlobPath("/a/b/"))
|
||||||
|
|
||||||
|
m = &AzureBlobStorage{cfg: &setting.AzureBlobStorageConfig{BasePath: "/base/"}}
|
||||||
|
assert.Equal(t, "base", m.buildAzureBlobPath("/"))
|
||||||
|
assert.Equal(t, "base", m.buildAzureBlobPath("."))
|
||||||
|
assert.Equal(t, "base/a", m.buildAzureBlobPath("/a"))
|
||||||
|
assert.Equal(t, "base/a/b", m.buildAzureBlobPath("/a/b/"))
|
||||||
|
}
|
|
@ -23,7 +23,7 @@ func TestMinioStorageIterator(t *testing.T) {
|
||||||
}
|
}
|
||||||
testStorageIterator(t, setting.MinioStorageType, &setting.Storage{
|
testStorageIterator(t, setting.MinioStorageType, &setting.Storage{
|
||||||
MinioConfig: setting.MinioStorageConfig{
|
MinioConfig: setting.MinioStorageConfig{
|
||||||
Endpoint: "127.0.0.1:9000",
|
Endpoint: "minio:9000",
|
||||||
AccessKeyID: "123456",
|
AccessKeyID: "123456",
|
||||||
SecretAccessKey: "12345678",
|
SecretAccessKey: "12345678",
|
||||||
Bucket: "gitea",
|
Bucket: "gitea",
|
||||||
|
|
|
@ -35,6 +35,7 @@ func testStorageIterator(t *testing.T, typStr Type, cfg *setting.Storage) {
|
||||||
"b": {"b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt"},
|
"b": {"b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt"},
|
||||||
"": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
"": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
||||||
"/": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
"/": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
||||||
|
".": {"a/1.txt", "b/1.txt", "b/2.txt", "b/3.txt", "b/x 4.txt", "ab/1.txt"},
|
||||||
"a/b/../../a": {"a/1.txt"},
|
"a/b/../../a": {"a/1.txt"},
|
||||||
}
|
}
|
||||||
for dir, expected := range expectedList {
|
for dir, expected := range expectedList {
|
||||||
|
|
|
@ -76,3 +76,24 @@ func IsEmptyReader(r io.Reader) (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CountingReader struct {
|
||||||
|
io.Reader
|
||||||
|
n int
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.Reader = &CountingReader{}
|
||||||
|
|
||||||
|
func (w *CountingReader) Count() int {
|
||||||
|
return w.n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *CountingReader) Read(p []byte) (int, error) {
|
||||||
|
n, err := w.Reader.Read(p)
|
||||||
|
w.n += n
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCountingReader(rd io.Reader) *CountingReader {
|
||||||
|
return &CountingReader{Reader: rd}
|
||||||
|
}
|
||||||
|
|
|
@ -428,7 +428,7 @@ func (ar artifactRoutes) getDownloadArtifactURL(ctx *ArtifactContext) {
|
||||||
var items []downloadArtifactResponseItem
|
var items []downloadArtifactResponseItem
|
||||||
for _, artifact := range artifacts {
|
for _, artifact := range artifacts {
|
||||||
var downloadURL string
|
var downloadURL string
|
||||||
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
if setting.Actions.ArtifactStorage.ServeDirect() {
|
||||||
u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName)
|
u, err := ar.fs.URL(artifact.StoragePath, artifact.ArtifactName)
|
||||||
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
if err != nil && !errors.Is(err, storage.ErrURLNotSupported) {
|
||||||
log.Error("Error getting serve direct url: %v", err)
|
log.Error("Error getting serve direct url: %v", err)
|
||||||
|
|
|
@ -55,7 +55,7 @@ func saveUploadChunkBase(st storage.ObjectStorage, ctx *ArtifactContext,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if writtenSize != contentSize {
|
if writtenSize != contentSize {
|
||||||
checkErr = errors.Join(checkErr, fmt.Errorf("contentSize not match body size"))
|
checkErr = errors.Join(checkErr, fmt.Errorf("writtenSize %d not match contentSize %d", writtenSize, contentSize))
|
||||||
}
|
}
|
||||||
if checkErr != nil {
|
if checkErr != nil {
|
||||||
if err := st.Delete(storagePath); err != nil {
|
if err := st.Delete(storagePath); err != nil {
|
||||||
|
|
|
@ -448,7 +448,7 @@ func (r *artifactV4Routes) getSignedArtifactURL(ctx *ArtifactContext) {
|
||||||
|
|
||||||
respData := GetSignedArtifactURLResponse{}
|
respData := GetSignedArtifactURLResponse{}
|
||||||
|
|
||||||
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
if setting.Actions.ArtifactStorage.ServeDirect() {
|
||||||
u, err := storage.ActionsArtifacts.URL(artifact.StoragePath, artifact.ArtifactPath)
|
u, err := storage.ActionsArtifacts.URL(artifact.StoragePath, artifact.ArtifactPath)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
respData.SignedUrl = u.String()
|
respData.SignedUrl = u.String()
|
||||||
|
|
|
@ -201,7 +201,7 @@ func GetRawFileOrLFS(ctx *context.APIContext) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
if setting.LFS.Storage.ServeDirect() {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name())
|
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name())
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
|
@ -326,7 +326,7 @@ func download(ctx *context.APIContext, archiveName string, archiver *repo_model.
|
||||||
archiver.CommitID, archiver.CommitID))
|
archiver.CommitID, archiver.CommitID))
|
||||||
|
|
||||||
rPath := archiver.RelativePath()
|
rPath := archiver.RelativePath()
|
||||||
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
|
if setting.RepoArchive.Storage.ServeDirect() {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.RepoArchives.URL(rPath, downloadName)
|
u, err := storage.RepoArchives.URL(rPath, downloadName)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
|
|
|
@ -23,7 +23,7 @@ func storageHandler(storageSetting *setting.Storage, prefix string, objStore sto
|
||||||
prefix = strings.Trim(prefix, "/")
|
prefix = strings.Trim(prefix, "/")
|
||||||
funcInfo := routing.GetFuncInfo(storageHandler, prefix)
|
funcInfo := routing.GetFuncInfo(storageHandler, prefix)
|
||||||
|
|
||||||
if storageSetting.MinioConfig.ServeDirect {
|
if storageSetting.ServeDirect() {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
if req.Method != "GET" && req.Method != "HEAD" {
|
if req.Method != "GET" && req.Method != "HEAD" {
|
||||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||||
|
|
|
@ -625,7 +625,7 @@ func ArtifactsDownloadView(ctx *context_module.Context) {
|
||||||
// The v4 backend enshures ContentEncoding is set to "application/zip", which is not the case for the old backend
|
// The v4 backend enshures ContentEncoding is set to "application/zip", which is not the case for the old backend
|
||||||
if len(artifacts) == 1 && artifacts[0].ArtifactName+".zip" == artifacts[0].ArtifactPath && artifacts[0].ContentEncoding == "application/zip" {
|
if len(artifacts) == 1 && artifacts[0].ArtifactName+".zip" == artifacts[0].ArtifactPath && artifacts[0].ContentEncoding == "application/zip" {
|
||||||
art := artifacts[0]
|
art := artifacts[0]
|
||||||
if setting.Actions.ArtifactStorage.MinioConfig.ServeDirect {
|
if setting.Actions.ArtifactStorage.ServeDirect() {
|
||||||
u, err := storage.ActionsArtifacts.URL(art.StoragePath, art.ArtifactPath)
|
u, err := storage.ActionsArtifacts.URL(art.StoragePath, art.ArtifactPath)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
ctx.Redirect(u.String())
|
ctx.Redirect(u.String())
|
||||||
|
|
|
@ -127,7 +127,7 @@ func ServeAttachment(ctx *context.Context, uuid string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting.Attachment.Storage.MinioConfig.ServeDirect {
|
if setting.Attachment.Storage.ServeDirect() {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.Attachments.URL(attach.RelativePath(), attach.Name)
|
u, err := storage.Attachments.URL(attach.RelativePath(), attach.Name)
|
||||||
|
|
||||||
|
|
|
@ -53,8 +53,8 @@ func ServeBlobOrLFS(ctx *context.Context, blob *git.Blob, lastModified *time.Tim
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
if setting.LFS.Storage.ServeDirect() {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage, blob storage), redirect to this directly.
|
||||||
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name())
|
u, err := storage.LFS.URL(pointer.RelativePath(), blob.Name())
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
ctx.Redirect(u.String())
|
ctx.Redirect(u.String())
|
||||||
|
|
|
@ -491,7 +491,7 @@ func download(ctx *context.Context, archiveName string, archiver *repo_model.Rep
|
||||||
archiver.CommitID, archiver.CommitID))
|
archiver.CommitID, archiver.CommitID))
|
||||||
|
|
||||||
rPath := archiver.RelativePath()
|
rPath := archiver.RelativePath()
|
||||||
if setting.RepoArchive.Storage.MinioConfig.ServeDirect {
|
if setting.RepoArchive.Storage.ServeDirect() {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.RepoArchives.URL(rPath, downloadName)
|
u, err := storage.RepoArchives.URL(rPath, downloadName)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
|
|
|
@ -453,7 +453,7 @@ func buildObjectResponse(rc *requestContext, pointer lfs_module.Pointer, downloa
|
||||||
|
|
||||||
if download {
|
if download {
|
||||||
var link *lfs_module.Link
|
var link *lfs_module.Link
|
||||||
if setting.LFS.Storage.MinioConfig.ServeDirect {
|
if setting.LFS.Storage.ServeDirect() {
|
||||||
// If we have a signed url (S3, object storage), redirect to this directly.
|
// If we have a signed url (S3, object storage), redirect to this directly.
|
||||||
u, err := storage.LFS.URL(pointer.RelativePath(), pointer.Oid)
|
u, err := storage.LFS.URL(pointer.RelativePath(), pointer.Oid)
|
||||||
if u != nil && err == nil {
|
if u != nil && err == nil {
|
||||||
|
|
|
@ -144,11 +144,12 @@ func TestPackageGeneric(t *testing.T) {
|
||||||
t.Run("ServeDirect", func(t *testing.T) {
|
t.Run("ServeDirect", func(t *testing.T) {
|
||||||
defer tests.PrintCurrentTest(t)()
|
defer tests.PrintCurrentTest(t)()
|
||||||
|
|
||||||
if setting.Packages.Storage.Type != setting.MinioStorageType {
|
if setting.Packages.Storage.Type != setting.MinioStorageType && setting.Packages.Storage.Type != setting.AzureBlobStorageType {
|
||||||
t.Skip("Test skipped for non-Minio-storage.")
|
t.Skip("Test skipped for non-Minio-storage and non-AzureBlob-storage.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if setting.Packages.Storage.Type == setting.MinioStorageType {
|
||||||
if !setting.Packages.Storage.MinioConfig.ServeDirect {
|
if !setting.Packages.Storage.MinioConfig.ServeDirect {
|
||||||
old := setting.Packages.Storage.MinioConfig.ServeDirect
|
old := setting.Packages.Storage.MinioConfig.ServeDirect
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -157,6 +158,16 @@ func TestPackageGeneric(t *testing.T) {
|
||||||
|
|
||||||
setting.Packages.Storage.MinioConfig.ServeDirect = true
|
setting.Packages.Storage.MinioConfig.ServeDirect = true
|
||||||
}
|
}
|
||||||
|
} else if setting.Packages.Storage.Type == setting.AzureBlobStorageType {
|
||||||
|
if !setting.Packages.Storage.AzureBlobConfig.ServeDirect {
|
||||||
|
old := setting.Packages.Storage.AzureBlobConfig.ServeDirect
|
||||||
|
defer func() {
|
||||||
|
setting.Packages.Storage.AzureBlobConfig.ServeDirect = old
|
||||||
|
}()
|
||||||
|
|
||||||
|
setting.Packages.Storage.AzureBlobConfig.ServeDirect = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
req := NewRequest(t, "GET", url+"/"+filename)
|
req := NewRequest(t, "GET", url+"/"+filename)
|
||||||
resp := MakeRequest(t, req, http.StatusSeeOther)
|
resp := MakeRequest(t, req, http.StatusSeeOther)
|
||||||
|
@ -168,7 +179,7 @@ func TestPackageGeneric(t *testing.T) {
|
||||||
|
|
||||||
resp2, err := (&http.Client{}).Get(location)
|
resp2, err := (&http.Client{}).Get(location)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, http.StatusOK, resp2.StatusCode)
|
assert.Equal(t, http.StatusOK, resp2.StatusCode, location)
|
||||||
|
|
||||||
body, err := io.ReadAll(resp2.Body)
|
body, err := io.ReadAll(resp2.Body)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -53,9 +53,6 @@ APP_DATA_PATH = tests/{{TEST_TYPE}}/gitea-{{TEST_TYPE}}-mssql/data
|
||||||
BUILTIN_SSH_SERVER_USER = git
|
BUILTIN_SSH_SERVER_USER = git
|
||||||
SSH_TRUSTED_USER_CA_KEYS = ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCb4DC1dMFnJ6pXWo7GMxTchtzmJHYzfN6sZ9FAPFR4ijMLfGki+olvOMO5Fql1/yGnGfbELQa1S6y4shSvj/5K+zUFScmEXYf3Gcr87RqilLkyk16RS+cHNB1u87xTHbETaa3nyCJeGQRpd4IQ4NKob745mwDZ7jQBH8AZEng50Oh8y8fi8skBBBzaYp1ilgvzG740L7uex6fHV62myq0SXeCa+oJUjq326FU8y+Vsa32H8A3e7tOgXZPdt2TVNltx2S9H2WO8RMi7LfaSwARNfy1zu+bfR50r6ef8Yx5YKCMz4wWb1SHU1GS800mjOjlInLQORYRNMlSwR1+vLlVDciOqFapDSbj+YOVOawR0R1aqlSKpZkt33DuOBPx9qe6CVnIi7Z+Px/KqM+OLCzlLY/RS+LbxQpDWcfTVRiP+S5qRTcE3M3UioN/e0BE/1+MpX90IGpvVkA63ILYbKEa4bM3ASL7ChTCr6xN5XT+GpVJveFKK1cfNx9ExHI4rzYE=
|
SSH_TRUSTED_USER_CA_KEYS = ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCb4DC1dMFnJ6pXWo7GMxTchtzmJHYzfN6sZ9FAPFR4ijMLfGki+olvOMO5Fql1/yGnGfbELQa1S6y4shSvj/5K+zUFScmEXYf3Gcr87RqilLkyk16RS+cHNB1u87xTHbETaa3nyCJeGQRpd4IQ4NKob745mwDZ7jQBH8AZEng50Oh8y8fi8skBBBzaYp1ilgvzG740L7uex6fHV62myq0SXeCa+oJUjq326FU8y+Vsa32H8A3e7tOgXZPdt2TVNltx2S9H2WO8RMi7LfaSwARNfy1zu+bfR50r6ef8Yx5YKCMz4wWb1SHU1GS800mjOjlInLQORYRNMlSwR1+vLlVDciOqFapDSbj+YOVOawR0R1aqlSKpZkt33DuOBPx9qe6CVnIi7Z+Px/KqM+OLCzlLY/RS+LbxQpDWcfTVRiP+S5qRTcE3M3UioN/e0BE/1+MpX90IGpvVkA63ILYbKEa4bM3ASL7ChTCr6xN5XT+GpVJveFKK1cfNx9ExHI4rzYE=
|
||||||
|
|
||||||
[attachment]
|
|
||||||
PATH = tests/{{TEST_TYPE}}/gitea-{{TEST_TYPE}}-mssql/data/attachments
|
|
||||||
|
|
||||||
[mailer]
|
[mailer]
|
||||||
ENABLED = true
|
ENABLED = true
|
||||||
PROTOCOL = dummy
|
PROTOCOL = dummy
|
||||||
|
@ -102,8 +99,13 @@ SECRET_KEY = 9pCviYTWSb
|
||||||
INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE0OTU1NTE2MTh9.hhSVGOANkaKk3vfCd2jDOIww4pUk0xtg9JRde5UogyQ
|
INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE0OTU1NTE2MTh9.hhSVGOANkaKk3vfCd2jDOIww4pUk0xtg9JRde5UogyQ
|
||||||
DISABLE_QUERY_AUTH_TOKEN = true
|
DISABLE_QUERY_AUTH_TOKEN = true
|
||||||
|
|
||||||
[lfs]
|
[storage]
|
||||||
PATH = tests/{{TEST_TYPE}}/gitea-{{TEST_TYPE}}-mssql/data/lfs
|
STORAGE_TYPE = azureblob
|
||||||
|
AZURE_BLOB_ENDPOINT = http://devstoreaccount1.azurite.local:10000
|
||||||
|
AZURE_BLOB_ACCOUNT_NAME = devstoreaccount1
|
||||||
|
AZURE_BLOB_ACCOUNT_KEY = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||||
|
AZURE_BLOB_CONTAINER = gitea
|
||||||
|
SERVE_DIRECT = false
|
||||||
|
|
||||||
[packages]
|
[packages]
|
||||||
ENABLED = true
|
ENABLED = true
|
||||||
|
|
|
@ -54,9 +54,6 @@ APP_DATA_PATH = tests/{{TEST_TYPE}}/gitea-{{TEST_TYPE}}-pgsql/data
|
||||||
BUILTIN_SSH_SERVER_USER = git
|
BUILTIN_SSH_SERVER_USER = git
|
||||||
SSH_TRUSTED_USER_CA_KEYS = ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCb4DC1dMFnJ6pXWo7GMxTchtzmJHYzfN6sZ9FAPFR4ijMLfGki+olvOMO5Fql1/yGnGfbELQa1S6y4shSvj/5K+zUFScmEXYf3Gcr87RqilLkyk16RS+cHNB1u87xTHbETaa3nyCJeGQRpd4IQ4NKob745mwDZ7jQBH8AZEng50Oh8y8fi8skBBBzaYp1ilgvzG740L7uex6fHV62myq0SXeCa+oJUjq326FU8y+Vsa32H8A3e7tOgXZPdt2TVNltx2S9H2WO8RMi7LfaSwARNfy1zu+bfR50r6ef8Yx5YKCMz4wWb1SHU1GS800mjOjlInLQORYRNMlSwR1+vLlVDciOqFapDSbj+YOVOawR0R1aqlSKpZkt33DuOBPx9qe6CVnIi7Z+Px/KqM+OLCzlLY/RS+LbxQpDWcfTVRiP+S5qRTcE3M3UioN/e0BE/1+MpX90IGpvVkA63ILYbKEa4bM3ASL7ChTCr6xN5XT+GpVJveFKK1cfNx9ExHI4rzYE=
|
SSH_TRUSTED_USER_CA_KEYS = ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCb4DC1dMFnJ6pXWo7GMxTchtzmJHYzfN6sZ9FAPFR4ijMLfGki+olvOMO5Fql1/yGnGfbELQa1S6y4shSvj/5K+zUFScmEXYf3Gcr87RqilLkyk16RS+cHNB1u87xTHbETaa3nyCJeGQRpd4IQ4NKob745mwDZ7jQBH8AZEng50Oh8y8fi8skBBBzaYp1ilgvzG740L7uex6fHV62myq0SXeCa+oJUjq326FU8y+Vsa32H8A3e7tOgXZPdt2TVNltx2S9H2WO8RMi7LfaSwARNfy1zu+bfR50r6ef8Yx5YKCMz4wWb1SHU1GS800mjOjlInLQORYRNMlSwR1+vLlVDciOqFapDSbj+YOVOawR0R1aqlSKpZkt33DuOBPx9qe6CVnIi7Z+Px/KqM+OLCzlLY/RS+LbxQpDWcfTVRiP+S5qRTcE3M3UioN/e0BE/1+MpX90IGpvVkA63ILYbKEa4bM3ASL7ChTCr6xN5XT+GpVJveFKK1cfNx9ExHI4rzYE=
|
||||||
|
|
||||||
[attachment]
|
|
||||||
PATH = tests/{{TEST_TYPE}}/gitea-{{TEST_TYPE}}-pgsql/data/attachments
|
|
||||||
|
|
||||||
[mailer]
|
[mailer]
|
||||||
ENABLED = true
|
ENABLED = true
|
||||||
PROTOCOL = dummy
|
PROTOCOL = dummy
|
||||||
|
|
Loading…
Reference in New Issue