0
Fork 0
mirror of https://github.com/dani-garcia/vaultwarden.git synced 2025-04-01 02:42:49 -05:00
This commit is contained in:
Chase Douglas 2025-03-17 16:29:55 +01:00 committed by GitHub
commit e6c3371e6b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
36 changed files with 3167 additions and 186 deletions

49
.github/workflows/lambda.yml vendored Normal file
View file

@ -0,0 +1,49 @@
name: Build Lambda Package
on: workflow_dispatch
jobs:
build:
runs-on: ubuntu-24.04-arm
container:
image: public.ecr.aws/codebuild/amazonlinux2-aarch64-standard:3.0
steps:
- name: Checkout source code
uses: actions/checkout@v4
- name: Install development packages
run: sudo yum install -y krb5-devel openldap-devel
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable
- name: Install Cargo Lambda
uses: jaxxstorm/action-install-gh-release@v1.9.0
with:
repo: cargo-lambda/cargo-lambda
platform: linux
arch: aarch64
- name: Setup rust Cache
uses: Swatinem/rust-cache@v2
- name: Build with Cargo
run: cargo lambda build --verbose
- name: Copy libpq and its dependencies
run: cp /lib64/{libcrypt.so.2,liblber-2.4.so.2,libldap_r-2.4.so.2,libpq.so.5,libsasl2.so.3} target/lambda/vaultwarden/
# This ensures passes the startup checks for the web-vault, which is
# instead served statically from an S3 Bucket
- name: Create placeholder web-vault/index.html
run: |-
mkdir target/lambda/vaultwarden/web-vault
echo "<html><body><h1>Web Vault Placeholder</h1></body></html>" > target/lambda/vaultwarden/web-vault/index.html
- name: Archive function package
uses: actions/upload-artifact@v4
with:
name: vaultwarden-lambda
path: target/lambda/vaultwarden/*

896
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -20,7 +20,11 @@ build = "build.rs"
enable_syslog = []
mysql = ["diesel/mysql", "diesel_migrations/mysql"]
postgresql = ["diesel/postgres", "diesel_migrations/postgres"]
aws = ["dsql", "s3", "ses"]
dsql = ["postgresql", "dep:aws-config", "dep:aws-sdk-dsql"]
sqlite = ["diesel/sqlite", "diesel_migrations/sqlite", "dep:libsqlite3-sys"]
s3 = ["dep:aws-config", "dep:aws-sdk-s3"]
ses = ["dep:aws-config", "dep:aws-sdk-sesv2"]
# Enable to use a vendored and statically linked openssl
vendored_openssl = ["openssl/vendored"]
# Enable MiMalloc memory allocator to replace the default malloc
@ -88,6 +92,12 @@ diesel-derive-newtype = "2.1.2"
# Bundled/Static SQLite
libsqlite3-sys = { version = "0.31.0", features = ["bundled"], optional = true }
# AWS Services
aws-config = { version = "1.5.12", features = ["behavior-version-latest"], optional = true }
aws-sdk-s3 = { version = "1.72.0", features = ["behavior-version-latest"], optional = true }
aws-sdk-dsql = { version = "1.2.0", features = ["behavior-version-latest"], optional = true }
aws-sdk-sesv2 = { version = "1.65.0", features = ["behavior-version-latest"], optional = true }
# Crypto-related libraries
rand = "0.9.0"
ring = "0.17.13"

7
CargoLambda.toml Normal file
View file

@ -0,0 +1,7 @@
[build]
features = ["aws"]
release = true
arm64 = true
[build.compiler]
type = "cargo"

3
aws/.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
.aws-sam
vaultwarden-lambda.zip
web-vault

53
aws/README.md Normal file
View file

@ -0,0 +1,53 @@
# AWS Serverless Deployment Instructions
## Architecture
```
CloudFront CDN
├─ API Lambda Function
│ ├─ Data S3 Bucket
│ ├─ Aurora DSQL Database
│ └─ Amazon Simple Email Service (SES)
└─ Web-vault static assets S3 Bucket
```
## A Note On AWS Accounts and Security
It is common to have one AWS account host multiple services. But it's easy, and doesn't cost any additional amount, to separate workloads into their own accounts. Doing so makes it easier to control for security concerns and monitor costs. AWS Identity and Access Management (IAM) enforces additional controls for cross-account access than for within-account access, for example, making it harder for security attacks to hop from workload to workload when they are in separate accounts.
Given the confidential nature of data stored in Vaultwarden, it is *highly* recommended that you create a new, separate AWS account just for Vaultwarden. If you only have one account, investigate creating an [AWS Organization](https://aws.amazon.com/organizations/) to make it easy to create a second account tied to the same billing and account management mechanism, and investigate creating an [AWS IAM Identity Center](https://aws.amazon.com/iam/identity-center/) instance for easy SSO access across your accounts.
## Initial Deployment
1. Create an AWS account
1. Install the AWS CLI
1. Install AWS SAM CLI
1. Download the vaultwarden-lambda.zip Lambda Function code package (e.g. from POC GHA artifact from run https://github.com/txase/vaultwarden/actions/runs/13315966383) to this directory
1. Pick a region that supports DSQL to deploy the Vaultwarden application into (must be one of us-east-1 or us-east-2 during DSQL Preview)
1. Create an Aurora DSQL Cluster in the region using the AWS Console (this will be automated when CloudFormation ships DSQL support at GA)
1. Setup local AWS configuration to access account and region from CLI
1. Copy DSQL Cluster ID
1. Run `./deploy.sh` in this directory
* Most parameters can be skipped at first, but you must provide the `DSQLClusterID` parameter value.
1. Note the "Output" values from the deploy command
* These can also be retrieved later by running `sam list stack-outputs`
1. Download the latest [web-vault build](https://github.com/dani-garcia/bw_web_builds/releases) and extract it
1. Sync the web-vault build contents into the WebVaultAssetsBucket:
* Inside the web-vault build folder run `aws s3 sync . s3://<WebVaultAssetsBucket>`, where `WebVaultAssetsBucket` is a stack output value
1. You can now navigate to your instance at the location of your `CDNDomain` stack output value
## Custom Domain
1. Create an AWS Certificate Manager (ACM) Certificate for your domain **in the us-east-1 region**
* There are many tutorials and/or automated ways to do this, including following the official docs [here](https://docs.aws.amazon.com/acm/latest/userguide/acm-public-certificates.html)
* It must be in the us-east-1 region because CloudFront only supports certificates from us-east-1
* Use key algorithm RSA 2048
* Continue to the next step once the certificate is in the *Issued* state
* Note the certificate's ARN
1. Run `./deploy.sh` again and add the following parameter values:
* **Domain**: `https://<custom domain>`
* **ACMCertificateArn**: The ARN of the certificate you created for the domain
1. Create a CNAME record for the custom domain set to the value of the CDNDomain stack output
## Email via AWS Simple Email Service (SES)
Email is complicated. These instructions will not attempt to walk you through setting up SES identities for sending email. You may find docs and guides online for how to do this.
In order for Vaultwarden to send emails using SES you must have an SES Email Address Identity that **does not have a default configuration set**. An identity with a default configuration set breaks the IAM permission model set up for the Vaultwarden API Function.
Once you have an SES Identity for the sending email address, run `./deploy.sh` again and provide the email address in the `SMTP_FROM` parameter.

9
aws/deploy.sh Executable file
View file

@ -0,0 +1,9 @@
#!/bin/sh -e
echo 'Building template...'
sam build
echo ''
sam deploy --guided

12
aws/samconfig.toml Normal file
View file

@ -0,0 +1,12 @@
version = 0.1
[default.global.parameters]
stack_name = "vaultwarden"
[default.deploy.parameters]
resolve_s3 = true
s3_prefix = "vaultwarden"
confirm_changeset = true
capabilities = "CAPABILITY_IAM"
image_repositories = []
disable_rollback = true

582
aws/template.yaml Normal file
View file

@ -0,0 +1,582 @@
AWSTemplateFormatVersion: '2010-09-09'
Description: AWS CloudFormation template for running VaultWarden on AWS serverless services.
Parameters:
Domain:
Type: String
Description: >-
The domain name for the Vaultwarden instance (e.g. https://example.com). If this parameter or the ACMCertificateArn
parameter are left empty, the Vaultwarden instance can still be reached at the output CDN domain
(e.g. https://xxxxxxxx.cloudfront.net).
AllowedPattern: (https://[a-z0-9.-]+|)
Default: ''
ACMCertificateArn:
Type: String
Description: The ARN of a us-east-1 ACM certificate to use for the domain. Required if the `Domain` parameter is set.
AllowedPattern: (arn:aws:acm:us-east-1:[0-9]+:certificate/[0-9a-f-]+|)
Default: ''
DSQLClusterId:
Type: String
Description: The endpoint of the DSQL database.
AllowedPattern: '[a-z0-9]+'
APILogRetention:
Type: Number
Description: The number of days to retain the API logs. -1 means to never expire.
Default: -1
AllowedValues: [-1, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, 3653]
SignupsAllowed:
Type: String
Description: Controls if new users can register
Default: 'true'
AllowedValues: ['true', 'false']
IconService:
Type: String
Description: Allowed icon service sources.
Default: bitwarden
AdminToken:
Type: String
Description: Token for the admin interface, preferably an Argon2 PCH string. If empty, the admin interface will be disabled.
Default: ''
SMTPFrom:
Type: String
Description: The email address to send emails from. Email service is disabled if this value is empty.
Default: ''
SMTPFromName:
Type: String
Description: The name to send emails from.
Default: Vaultwarden
Mappings:
IconSource:
internal:
CSP: ''
bitwarden:
CSP: https://icons.bitwarden.net/
duckduckgo:
CSP: https://icons.duckduckgo.com/ip3/
google:
CSP: https://www.google.com/s2/favicons https://*.gstatic.com/favicon
Conditions:
IsDomainAndCertificateSet: !And
- !Not [!Equals [!Ref Domain, '']]
- !Not [!Equals [!Ref ACMCertificateArn, '']]
IsApiLogRetentionNeverExpire: !Equals
- !Ref APILogRetention
- -1
IconSourceIsPredefined: !Or
- !Equals [!Ref IconService, internal]
- !Equals [!Ref IconService, bitwarden]
- !Equals [!Ref IconService, duckduckgo]
- !Equals [!Ref IconService, google]
IsAdminTokenEmpty: !Equals
- !Ref AdminToken
- ''
IsEmailEnabled: !Not
- !Equals
- !Ref SMTPFrom
- ''
Resources:
DataBucket:
Type: AWS::S3::Bucket
Properties:
BucketEncryption:
ServerSideEncryptionConfiguration:
- BucketKeyEnabled: true
ServerSideEncryptionByDefault:
SSEAlgorithm: aws:kms
BucketName: !Sub ${AWS::StackName}-${AWS::AccountId}-${AWS::Region}-data
CorsConfiguration:
CorsRules:
- AllowedMethods:
- GET
- HEAD
AllowedOrigins:
- '*'
LifecycleConfiguration:
Rules:
- AbortIncompleteMultipartUpload:
DaysAfterInitiation: 2
ExpiredObjectDeleteMarker: true
NoncurrentVersionExpiration:
NoncurrentDays: 30
Status: Enabled
PublicAccessBlockConfiguration:
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
VersioningConfiguration:
Status: Enabled
DataBucketEnforceEncryptionAndStorageTier:
Type: AWS::S3::BucketPolicy
Properties:
Bucket: !Ref DataBucket
PolicyDocument:
Version: '2012-10-17'
Statement:
- Sid: DenyUnencryptedObjectUploads
Effect: Deny
Principal: '*'
Action: s3:PutObject
Resource: !Sub arn:${AWS::Partition}:s3:::${DataBucket}/*
Condition:
'Null':
s3:x-amz-server-side-encryption-aws-kms-key-id: true
- Sid: DenyUnencryptedTransit
Effect: Deny
Principal: '*'
Action: s3:*
Resource:
- !Sub arn:${AWS::Partition}:s3:::${DataBucket}
- !Sub arn:${AWS::Partition}:s3:::${DataBucket}/*
Condition:
Bool:
aws:SecureTransport: false
- Sid: DenyNonIntelligentTieringStorageClass
Effect: Deny
Principal: '*'
Action: s3:PutObject
Resource: !Sub arn:aws:s3:::${DataBucket}/*
Condition:
StringNotEquals:
s3:x-amz-storage-class: INTELLIGENT_TIERING
ApiFunctionRole:
Type: AWS::IAM::Role
Properties:
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Action: sts:AssumeRole
Effect: Allow
Principal:
Service: lambda.amazonaws.com
ManagedPolicyArns:
- arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
Policies:
- PolicyName: AccessAWSServices
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- s3:GetObject
- s3:ListBucket
- s3:PutObject
- s3:DeleteObject
Resource:
- !Sub arn:${AWS::Partition}:s3:::${DataBucket}
- !Sub arn:${AWS::Partition}:s3:::${DataBucket}/*
- Effect: Allow
Action: dsql:DbConnectAdmin
Resource: !Sub arn:${AWS::Partition}:dsql:${AWS::Region}:${AWS::AccountId}:cluster/${DSQLClusterId}
- !If
- IsEmailEnabled
- Effect: Allow
Action: ses:SendRawEmail
Resource: '*'
Condition:
StringEquals:
ses:FromAddress: !Ref SMTPFrom
ses:FromDisplayName: !Ref SMTPFromName
- !Ref AWS::NoValue
ApiFunction:
Type: AWS::Lambda::Function
Properties:
Architectures:
- arm64
Code: ./vaultwarden-lambda.zip
Environment:
Variables:
AWS_LWA_PORT: 8000
AWS_LWA_READINESS_CHECK_PATH: /alive
AWS_LWA_ASYNC_INIT: true
AWS_LWA_ENABLE_COMPRESSION: true
AWS_LWA_INVOKE_MODE: RESPONSE_STREAM
DATA_FOLDER: !Sub s3://${DataBucket}
TMP_FOLDER: /tmp
DATABASE_URL: !Sub dsql://${DSQLClusterId}.dsql.${AWS::Region}.on.aws
ENABLE_WEBSOCKET: false
DOMAIN: !If
- IsDomainAndCertificateSet
- !Ref Domain
- !Ref AWS::NoValue
SIGNUPS_ALLOWED: !Ref SignupsAllowed
IP_HEADER: X-Forwarded-For
ICON_SERVICE: !Ref IconService
ICON_REDIRECT_CODE: 301
ADMIN_TOKEN: !If
- IsAdminTokenEmpty
- !Ref AWS::NoValue
- !Ref AdminToken
SMTP_FROM: !If
- IsEmailEnabled
- !Ref SMTPFrom
- !Ref AWS::NoValue
SMTP_FROM_NAME: !Ref SMTPFromName
USE_AWS_SES: true
FunctionName: !Sub ${AWS::StackName}-api
Handler: bootstrap
Layers:
- !Sub arn:aws:lambda:${AWS::Region}:753240598075:layer:LambdaAdapterLayerArm64:24
MemorySize: 3008 # Maximum value allowed for new accounts, higher value reduces cold start times, should still fit under free tier usage for personal use
Role: !GetAtt ApiFunctionRole.Arn
Runtime: provided.al2023
Timeout: 300
ApiFunctionLogs:
Type: AWS::Logs::LogGroup
DeletionPolicy: RetainExceptOnCreate
Properties:
LogGroupName: !Sub /aws/lambda/${ApiFunction}
RetentionInDays: !If
- IsApiLogRetentionNeverExpire
- !Ref AWS::NoValue
- !Ref APILogRetention
ApiFunctionUrl:
Type: AWS::Lambda::Url
Properties:
TargetFunctionArn: !Ref ApiFunction
AuthType: NONE
InvokeMode: RESPONSE_STREAM
ApiFunctionUrlPublicPermissions:
Type: AWS::Lambda::Permission
Properties:
Action: lambda:InvokeFunctionUrl
FunctionName: !Ref ApiFunction
Principal: '*'
FunctionUrlAuthType: NONE
WebVaultAssetsBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: !Sub ${AWS::StackName}-${AWS::AccountId}-${AWS::Region}-web-vault
PublicAccessBlockConfiguration:
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
WebVaultAssetsBucketEnforceEncryptionInTransitAndStorageTier:
Type: AWS::S3::BucketPolicy
Properties:
Bucket: !Ref DataBucket
PolicyDocument:
Version: '2012-10-17'
Statement:
- Sid: DenyUnencryptedTransit
Effect: Deny
Principal: '*'
Action: s3:*
Resource:
- !Sub arn:${AWS::Partition}:s3:::${DataBucket}
- !Sub arn:${AWS::Partition}:s3:::${DataBucket}/*
Condition:
Bool:
aws:SecureTransport: false
- Sid: DenyNonIntelligentTieringStorageClass
Effect: Deny
Principal: '*'
Action: s3:PutObject
Resource: !Sub arn:aws:s3:::${DataBucket}/*
Condition:
StringNotEquals:
s3:x-amz-storage-class: INTELLIGENT_TIERING
WebVaultAssetsBucketOriginAccessControl:
Type: AWS::CloudFront::OriginAccessControl
Properties:
OriginAccessControlConfig:
Name: !Sub ${AWS::StackName}-${AWS::Region}-web-vault-access-control
OriginAccessControlOriginType: s3
SigningBehavior: always
SigningProtocol: sigv4
# The following mirrors the header values in util.rs
ResponseHeaderPolicy:
Type: AWS::CloudFront::ResponseHeadersPolicy
Properties:
ResponseHeadersPolicyConfig:
Name: !Sub ${AWS::StackName}-${AWS::Region}
CustomHeadersConfig:
Items:
- Header: Cache-Control
Override: false
Value: no-cache, no-store, max-age=0
- Header: X-Robots-Tag
Override: true
Value: noindex, nofollow
- Header: Permissions-Policy
Override: true
Value: accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()
SecurityHeadersConfig:
ContentSecurityPolicy:
ContentSecurityPolicy: !Sub
- >-
default-src 'self';
base-uri 'self';
form-action 'self';
object-src 'self' blob:;
script-src 'self' 'wasm-unsafe-eval';
style-src 'self' 'unsafe-inline';
child-src 'self' https://*.duosecurity.com https://*.duofederal.com;
frame-src 'self' https://*.duosecurity.com https://*.duofederal.com;
frame-ancestors 'self'
chrome-extension://nngceckbapebfimnlniiiahkandclblb
chrome-extension://jbkfoedolllekgbhcbcoahefnbanhhlh
moz-extension://*;
img-src 'self' data:
https://haveibeenpwned.com
${IconServiceCSP};
connect-src 'self'
https://api.pwnedpasswords.com
https://api.2fa.directory
https://app.simplelogin.io/api/
https://app.addy.io/api/
https://api.fastmail.com/
https://api.forwardemail.net
https://${DataBucket.RegionalDomainName};
- IconServiceCSP: !If
- IconSourceIsPredefined
- !FindInMap [IconSource, !Ref IconService, CSP]
- !Select
- 0
- !Split ['{', !Ref IconService]
Override: true
ContentTypeOptions:
Override: true
FrameOptions:
FrameOption: SAMEORIGIN
Override: true
ReferrerPolicy:
Override: true
ReferrerPolicy: same-origin
StrictTransportSecurity:
AccessControlMaxAgeSec: 63072000
IncludeSubdomains: true
Override: true
Preload: true
XSSProtection:
Override: true
Protection: false
# The following mirrors the header values in util.rs
ConnectorHtmlResponseHeaderPolicy:
Type: AWS::CloudFront::ResponseHeadersPolicy
Properties:
ResponseHeadersPolicyConfig:
Name: !Sub ${AWS::StackName}-${AWS::Region}-connector-html
CustomHeadersConfig:
Items:
- Header: Cache-Control
Override: true
Value: no-cache, no-store, max-age=0
- Header: X-Robots-Tag
Override: true
Value: noindex, nofollow
- Header: Permissions-Policy
Override: true
Value: accelerometer=(), ambient-light-sensor=(), autoplay=(), battery=(), camera=(), display-capture=(), document-domain=(), encrypted-media=(), execution-while-not-rendered=(), execution-while-out-of-viewport=(), fullscreen=(), geolocation=(), gyroscope=(), keyboard-map=(), magnetometer=(), microphone=(), midi=(), payment=(), picture-in-picture=(), screen-wake-lock=(), sync-xhr=(), usb=(), web-share=(), xr-spatial-tracking=()
SecurityHeadersConfig:
ContentTypeOptions:
Override: true
ReferrerPolicy:
Override: true
ReferrerPolicy: same-origin
StrictTransportSecurity:
AccessControlMaxAgeSec: 63072000
IncludeSubdomains: true
Override: true
Preload: true
XSSProtection:
Override: true
Protection: false
CDN:
Type: AWS::CloudFront::Distribution
Properties:
DistributionConfig:
Aliases: !If
- IsDomainAndCertificateSet
- - !Select
- 2
- !Split
- /
- !Ref Domain
- !Ref AWS::NoValue
CacheBehaviors:
- AllowedMethods:
- DELETE
- HEAD
- GET
- OPTIONS
- PATCH
- POST
- PUT
CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /api/*
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- AllowedMethods:
- DELETE
- HEAD
- GET
- OPTIONS
- PATCH
- POST
- PUT
CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /admin
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- AllowedMethods:
- DELETE
- HEAD
- GET
- OPTIONS
- PATCH
- POST
- PUT
CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /admin/*
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- AllowedMethods:
- DELETE
- HEAD
- GET
- OPTIONS
- PATCH
- POST
- PUT
CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /events/*
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- AllowedMethods:
- DELETE
- HEAD
- GET
- OPTIONS
- PATCH
- POST
- PUT
CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /identity/*
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /css/*
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /vw_static/*
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- CachePolicyId: 658327ea-f89d-4fab-a63d-7e88639e58f6 # CachingOptimized
Compress: true
OriginRequestPolicyId: b689b0a8-53d0-40ab-baf2-68738e2966ac # AllViewerExceptHostHeader
PathPattern: /icons/*
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: Api
ViewerProtocolPolicy: redirect-to-https
- CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
PathPattern: '*.html'
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: WebVaultAssetsBucket
ViewerProtocolPolicy: redirect-to-https
- CachePolicyId: 4135ea2d-6df8-44a3-9df3-4b5a84be39ad # CachingDisabled
Compress: true
PathPattern: '*connector.html'
ResponseHeadersPolicyId: !Ref ConnectorHtmlResponseHeaderPolicy
TargetOriginId: WebVaultAssetsBucket
ViewerProtocolPolicy: redirect-to-https
Comment: Vaultwarden CDN
CustomErrorResponses:
- ErrorCode: 403
ResponseCode: 200
ResponsePagePath: /404.html
DefaultCacheBehavior:
CachePolicyId: 658327ea-f89d-4fab-a63d-7e88639e58f6 # CachingOptimized
Compress: true
ResponseHeadersPolicyId: !Ref ResponseHeaderPolicy
TargetOriginId: WebVaultAssetsBucket
ViewerProtocolPolicy: redirect-to-https
DefaultRootObject: index.html
Enabled: true
HttpVersion: http2and3
IPV6Enabled: true
Origins:
- Id: WebVaultAssetsBucket
DomainName: !GetAtt WebVaultAssetsBucket.RegionalDomainName
OriginAccessControlId: !GetAtt WebVaultAssetsBucketOriginAccessControl.Id
S3OriginConfig:
OriginAccessIdentity: ''
- Id: Api
CustomOriginConfig:
OriginProtocolPolicy: https-only
OriginSSLProtocols:
- TLSv1.2
DomainName: !Select
- 2
- !Split
- /
- !GetAtt ApiFunctionUrl.FunctionUrl
PriceClass: PriceClass_All
ViewerCertificate: !If
- IsDomainAndCertificateSet
- AcmCertificateArn: !Ref ACMCertificateArn
MinimumProtocolVersion: TLSv1.2_2021
SslSupportMethod: sni-only
- !Ref AWS::NoValue
WebVaultAssetsBucketPolicyCloudFrontAccess:
Type: AWS::S3::BucketPolicy
Properties:
Bucket: !Ref WebVaultAssetsBucket
PolicyDocument:
Id: CloudFrontAccess
Version: '2012-10-17'
Statement:
- Principal:
Service: !Sub cloudfront.${AWS::URLSuffix}
Action: s3:GetObject
Effect: Allow
Resource: !Sub ${WebVaultAssetsBucket.Arn}/*
Condition:
StringEquals:
AWS:SourceArn: !Sub arn:${AWS::Partition}:cloudfront::${AWS::AccountId}:distribution/${CDN.Id}
Outputs:
WebVaultAssetsBucket:
Value: !Ref WebVaultAssetsBucket
CDNDomain:
Value: !GetAtt CDN.DomainName

View file

@ -9,10 +9,18 @@ fn main() {
println!("cargo:rustc-cfg=mysql");
#[cfg(feature = "postgresql")]
println!("cargo:rustc-cfg=postgresql");
#[cfg(feature = "dsql")]
println!("cargo:rustc-cfg=dsql");
#[cfg(feature = "query_logger")]
println!("cargo:rustc-cfg=query_logger");
#[cfg(feature = "s3")]
println!("cargo:rustc-cfg=s3");
#[cfg(feature = "ses")]
println!("cargo:rustc-cfg=ses");
#[cfg(feature = "aws")]
println!("cargo:rustc-cfg=aws");
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql", feature = "dsql")))]
compile_error!(
"You need to enable one DB backend. To build with previous defaults do: cargo build --features sqlite"
);
@ -22,7 +30,11 @@ fn main() {
println!("cargo::rustc-check-cfg=cfg(sqlite)");
println!("cargo::rustc-check-cfg=cfg(mysql)");
println!("cargo::rustc-check-cfg=cfg(postgresql)");
println!("cargo::rustc-check-cfg=cfg(dsql)");
println!("cargo::rustc-check-cfg=cfg(query_logger)");
println!("cargo::rustc-check-cfg=cfg(s3)");
println!("cargo::rustc-check-cfg=cfg(ses)");
println!("cargo::rustc-check-cfg=cfg(aws)");
// Rerun when these paths are changed.
// Someone could have checked-out a tag or specific commit, but no other files changed.

View file

@ -0,0 +1 @@
run_in_transaction = false

View file

@ -0,0 +1,281 @@
CREATE TABLE attachments (
id text NOT NULL PRIMARY KEY,
cipher_uuid character varying(40) NOT NULL,
file_name text NOT NULL,
file_size bigint NOT NULL,
akey text
);
CREATE TABLE auth_requests (
uuid character(36) NOT NULL PRIMARY KEY,
user_uuid character(36) NOT NULL,
organization_uuid character(36),
request_device_identifier character(36) NOT NULL,
device_type integer NOT NULL,
request_ip text NOT NULL,
response_device_id character(36),
access_code text NOT NULL,
public_key text NOT NULL,
enc_key text,
master_password_hash text,
approved boolean,
creation_date timestamp without time zone NOT NULL,
response_date timestamp without time zone,
authentication_date timestamp without time zone
);
CREATE TABLE ciphers (
uuid character varying(40) NOT NULL PRIMARY KEY,
created_at timestamp without time zone NOT NULL,
updated_at timestamp without time zone NOT NULL,
user_uuid character varying(40),
organization_uuid character varying(40),
atype integer NOT NULL,
name text NOT NULL,
notes text,
fields text,
data text NOT NULL,
password_history text,
deleted_at timestamp without time zone,
reprompt integer,
key text
);
CREATE TABLE ciphers_collections (
cipher_uuid character varying(40) NOT NULL,
collection_uuid character varying(40) NOT NULL,
PRIMARY KEY (cipher_uuid, collection_uuid)
);
CREATE TABLE collections (
uuid character varying(40) NOT NULL PRIMARY KEY,
org_uuid character varying(40) NOT NULL,
name text NOT NULL,
external_id text
);
CREATE TABLE collections_groups (
collections_uuid character varying(40) NOT NULL,
groups_uuid character(36) NOT NULL,
read_only boolean NOT NULL,
hide_passwords boolean NOT NULL,
PRIMARY KEY (collections_uuid, groups_uuid)
);
CREATE TABLE devices (
uuid character varying(40) NOT NULL,
created_at timestamp without time zone NOT NULL,
updated_at timestamp without time zone NOT NULL,
user_uuid character varying(40) NOT NULL,
name text NOT NULL,
atype integer NOT NULL,
push_token text,
refresh_token text NOT NULL,
twofactor_remember text,
push_uuid text,
PRIMARY KEY (uuid, user_uuid)
);
CREATE TABLE emergency_access (
uuid character(36) NOT NULL PRIMARY KEY,
grantor_uuid character(36),
grantee_uuid character(36),
email character varying(255),
key_encrypted text,
atype integer NOT NULL,
status integer NOT NULL,
wait_time_days integer NOT NULL,
recovery_initiated_at timestamp without time zone,
last_notification_at timestamp without time zone,
updated_at timestamp without time zone NOT NULL,
created_at timestamp without time zone NOT NULL
);
CREATE TABLE event (
uuid character(36) NOT NULL PRIMARY KEY,
event_type integer NOT NULL,
user_uuid character(36),
org_uuid character(36),
cipher_uuid character(36),
collection_uuid character(36),
group_uuid character(36),
org_user_uuid character(36),
act_user_uuid character(36),
device_type integer,
ip_address text,
event_date timestamp without time zone NOT NULL,
policy_uuid character(36),
provider_uuid character(36),
provider_user_uuid character(36),
provider_org_uuid character(36)
);
CREATE TABLE favorites (
user_uuid character varying(40) NOT NULL,
cipher_uuid character varying(40) NOT NULL,
PRIMARY KEY (user_uuid, cipher_uuid)
);
CREATE TABLE folders (
uuid character varying(40) NOT NULL PRIMARY KEY,
created_at timestamp without time zone NOT NULL,
updated_at timestamp without time zone NOT NULL,
user_uuid character varying(40) NOT NULL,
name text NOT NULL
);
CREATE TABLE folders_ciphers (
cipher_uuid character varying(40) NOT NULL,
folder_uuid character varying(40) NOT NULL,
PRIMARY KEY (cipher_uuid, folder_uuid)
);
CREATE TABLE groups (
uuid character(36) NOT NULL PRIMARY KEY,
organizations_uuid character varying(40) NOT NULL,
name character varying(100) NOT NULL,
access_all boolean NOT NULL,
external_id character varying(300),
creation_date timestamp without time zone NOT NULL,
revision_date timestamp without time zone NOT NULL
);
CREATE TABLE groups_users (
groups_uuid character(36) NOT NULL,
users_organizations_uuid character varying(36) NOT NULL,
PRIMARY KEY (groups_uuid, users_organizations_uuid)
);
CREATE TABLE invitations (
email text NOT NULL PRIMARY KEY
);
CREATE TABLE org_policies (
uuid character(36) NOT NULL PRIMARY KEY,
org_uuid character(36) NOT NULL,
atype integer NOT NULL,
enabled boolean NOT NULL,
data text NOT NULL,
UNIQUE (org_uuid, atype)
);
CREATE TABLE organization_api_key (
uuid character(36) NOT NULL,
org_uuid character(36) NOT NULL,
atype integer NOT NULL,
api_key character varying(255),
revision_date timestamp without time zone NOT NULL,
PRIMARY KEY (uuid, org_uuid)
);
CREATE TABLE organizations (
uuid character varying(40) NOT NULL PRIMARY KEY,
name text NOT NULL,
billing_email text NOT NULL,
private_key text,
public_key text
);
CREATE TABLE sends (
uuid character(36) NOT NULL PRIMARY KEY,
user_uuid character(36),
organization_uuid character(36),
name text NOT NULL,
notes text,
atype integer NOT NULL,
data text NOT NULL,
akey text NOT NULL,
password_hash bytea,
password_salt bytea,
password_iter integer,
max_access_count integer,
access_count integer NOT NULL,
creation_date timestamp without time zone NOT NULL,
revision_date timestamp without time zone NOT NULL,
expiration_date timestamp without time zone,
deletion_date timestamp without time zone NOT NULL,
disabled boolean NOT NULL,
hide_email boolean
);
CREATE TABLE twofactor (
uuid character varying(40) NOT NULL PRIMARY KEY,
user_uuid character varying(40) NOT NULL,
atype integer NOT NULL,
enabled boolean NOT NULL,
data text NOT NULL,
last_used bigint DEFAULT 0 NOT NULL,
UNIQUE (user_uuid, atype)
);
CREATE TABLE twofactor_duo_ctx (
state character varying(64) NOT NULL PRIMARY KEY,
user_email character varying(255) NOT NULL,
nonce character varying(64) NOT NULL,
exp bigint NOT NULL
);
CREATE TABLE twofactor_incomplete (
user_uuid character varying(40) NOT NULL,
device_uuid character varying(40) NOT NULL,
device_name text NOT NULL,
login_time timestamp without time zone NOT NULL,
ip_address text NOT NULL,
device_type integer DEFAULT 14 NOT NULL,
PRIMARY KEY (user_uuid, device_uuid)
);
CREATE TABLE users (
uuid character varying(40) NOT NULL PRIMARY KEY,
created_at timestamp without time zone NOT NULL,
updated_at timestamp without time zone NOT NULL,
email text NOT NULL UNIQUE,
name text NOT NULL,
password_hash bytea NOT NULL,
salt bytea NOT NULL,
password_iterations integer NOT NULL,
password_hint text,
akey text NOT NULL,
private_key text,
public_key text,
totp_secret text,
totp_recover text,
security_stamp text NOT NULL,
equivalent_domains text NOT NULL,
excluded_globals text NOT NULL,
client_kdf_type integer DEFAULT 0 NOT NULL,
client_kdf_iter integer DEFAULT 100000 NOT NULL,
verified_at timestamp without time zone,
last_verifying_at timestamp without time zone,
login_verify_count integer DEFAULT 0 NOT NULL,
email_new character varying(255) DEFAULT NULL::character varying,
email_new_token character varying(16) DEFAULT NULL::character varying,
enabled boolean DEFAULT true NOT NULL,
stamp_exception text,
api_key text,
avatar_color text,
client_kdf_memory integer,
client_kdf_parallelism integer,
external_id text
);
CREATE TABLE users_collections (
user_uuid character varying(40) NOT NULL,
collection_uuid character varying(40) NOT NULL,
read_only boolean DEFAULT false NOT NULL,
hide_passwords boolean DEFAULT false NOT NULL,
PRIMARY KEY (user_uuid, collection_uuid)
);
CREATE TABLE users_organizations (
uuid character varying(40) NOT NULL PRIMARY KEY,
user_uuid character varying(40) NOT NULL,
org_uuid character varying(40) NOT NULL,
access_all boolean NOT NULL,
akey text NOT NULL,
status integer NOT NULL,
atype integer NOT NULL,
reset_password_key text,
external_id text,
UNIQUE (user_uuid, org_uuid)
);

View file

@ -0,0 +1 @@
run_in_transaction = false

View file

@ -0,0 +1,8 @@
-- DSQL preview can't add columns with constraints, dropping `NOT NULL DEFAULT FALSE` constraint
-- It appears Diesel will ensure the column has appropriate values when saving records.
ALTER TABLE users_collections
ADD COLUMN manage BOOLEAN;
ALTER TABLE collections_groups
ADD COLUMN manage BOOLEAN;

View file

@ -745,17 +745,17 @@ fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
}
#[post("/config", format = "application/json", data = "<data>")]
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
async fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
let data: ConfigBuilder = data.into_inner();
if let Err(e) = CONFIG.update_config(data, true) {
if let Err(e) = CONFIG.update_config(data, true).await {
err!(format!("Unable to save config: {e:?}"))
}
Ok(())
}
#[post("/config/delete", format = "application/json")]
fn delete_config(_token: AdminToken) -> EmptyResult {
if let Err(e) = CONFIG.delete_user_config() {
async fn delete_config(_token: AdminToken) -> EmptyResult {
if let Err(e) = CONFIG.delete_user_config().await {
err!(format!("Unable to delete config: {e:?}"))
}
Ok(())

View file

@ -17,6 +17,7 @@ use crate::{
auth::Headers,
crypto,
db::{models::*, DbConn, DbPool},
persistent_fs::{canonicalize, create_dir_all, persist_temp_file},
CONFIG,
};
@ -110,7 +111,7 @@ async fn sync(
headers: Headers,
client_version: Option<ClientVersion>,
mut conn: DbConn,
) -> Json<Value> {
) -> JsonResult {
let user_json = headers.user.to_json(&mut conn).await;
// Get all ciphers which are visible by the user
@ -134,7 +135,7 @@ async fn sync(
for c in ciphers {
ciphers_json.push(
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
.await,
.await?,
);
}
@ -159,7 +160,7 @@ async fn sync(
api::core::_get_eq_domains(headers, true).into_inner()
};
Json(json!({
Ok(Json(json!({
"profile": user_json,
"folders": folders_json,
"collections": collections_json,
@ -168,11 +169,11 @@ async fn sync(
"domains": domains_json,
"sends": sends_json,
"object": "sync"
}))
})))
}
#[get("/ciphers")]
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult {
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
@ -180,15 +181,15 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
for c in ciphers {
ciphers_json.push(
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
.await,
.await?,
);
}
Json(json!({
Ok(Json(json!({
"data": ciphers_json,
"object": "list",
"continuationToken": null
}))
})))
}
#[get("/ciphers/<cipher_id>")]
@ -201,7 +202,7 @@ async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) ->
err!("Cipher is not owned by user")
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[get("/ciphers/<cipher_id>/admin")]
@ -339,7 +340,7 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
let mut cipher = Cipher::new(data.r#type, data.name.clone());
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
@ -676,7 +677,7 @@ async fn put_cipher(
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[post("/ciphers/<cipher_id>/partial", data = "<data>")]
@ -714,7 +715,7 @@ async fn put_cipher_partial(
// Update favorite
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[derive(Deserialize)]
@ -825,7 +826,7 @@ async fn post_collections_update(
)
.await;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[put("/ciphers/<cipher_id>/collections-admin", data = "<data>")]
@ -1030,7 +1031,7 @@ async fn share_cipher_by_uuid(
update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
/// v2 API for downloading an attachment. This just redirects the client to
@ -1055,7 +1056,7 @@ async fn get_attachment(
}
match Attachment::find_by_id(&attachment_id, &mut conn).await {
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))),
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host).await?)),
Some(_) => err!("Attachment doesn't belong to cipher"),
None => err!("Attachment doesn't exist"),
}
@ -1116,7 +1117,7 @@ async fn post_attachment_v2(
"attachmentId": attachment_id,
"url": url,
"fileUploadType": FileUploadType::Direct as i32,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await,
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?,
})))
}
@ -1142,7 +1143,7 @@ async fn save_attachment(
mut conn: DbConn,
nt: Notify<'_>,
) -> Result<(Cipher, DbConn), crate::error::Error> {
let mut data = data.into_inner();
let data = data.into_inner();
let Some(size) = data.data.len().to_i64() else {
err!("Attachment data size overflow");
@ -1269,13 +1270,11 @@ async fn save_attachment(
attachment.save(&mut conn).await.expect("Error saving attachment");
}
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_id.as_ref());
let folder_path = canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_id.as_ref());
let file_path = folder_path.join(file_id.as_ref());
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.data.persist_to(&file_path).await {
data.data.move_copy_to(file_path).await?
}
create_dir_all(&folder_path).await?;
persist_temp_file(data.data, file_path).await?;
nt.send_cipher_update(
UpdateType::SyncCipherUpdate,
@ -1342,7 +1341,7 @@ async fn post_attachment(
let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?;
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
}
#[post("/ciphers/<cipher_id>/attachment-admin", format = "multipart/form-data", data = "<data>")]
@ -1786,7 +1785,7 @@ async fn _restore_cipher_by_uuid(
.await;
}
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
}
async fn _restore_multiple_ciphers(

View file

@ -582,7 +582,7 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut
CipherSyncType::User,
&mut conn,
)
.await,
.await?,
);
}

View file

@ -15,6 +15,7 @@ use crate::{
OwnerHeaders,
},
db::{models::*, DbConn},
error::Error,
mail,
util::{convert_json_key_lcase_first, NumberOrString},
CONFIG,
@ -901,21 +902,21 @@ async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, mut conn: D
}
Ok(Json(json!({
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await,
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await?,
"object": "list",
"continuationToken": null,
})))
}
async fn _get_org_details(org_id: &OrganizationId, host: &str, user_id: &UserId, conn: &mut DbConn) -> Value {
async fn _get_org_details(org_id: &OrganizationId, host: &str, user_id: &UserId, conn: &mut DbConn) -> Result<Value, Error> {
let ciphers = Cipher::find_by_org(org_id, conn).await;
let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await;
let mut ciphers_json = Vec::with_capacity(ciphers.len());
for c in ciphers {
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await);
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await?);
}
json!(ciphers_json)
Ok(json!(ciphers_json))
}
#[derive(FromForm)]
@ -3317,7 +3318,7 @@ async fn get_org_export(
"continuationToken": null,
},
"ciphers": {
"data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await),
"data": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
"object": "list",
"continuationToken": null,
}
@ -3326,7 +3327,7 @@ async fn get_org_export(
// v2023.1.0 and newer response
Ok(Json(json!({
"collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await),
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await),
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
})))
}
}

View file

@ -12,6 +12,8 @@ use crate::{
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
auth::{ClientIp, Headers, Host},
db::{models::*, DbConn, DbPool},
error::Error,
persistent_fs::{canonicalize, create_dir_all, download_url, file_exists, persist_temp_file},
util::NumberOrString,
CONFIG,
};
@ -210,7 +212,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
let UploadData {
model,
mut data,
data,
} = data.into_inner();
let model = model.into_inner();
@ -250,13 +252,11 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
}
let file_id = crate::crypto::generate_send_file_id();
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
let folder_path = canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
let file_path = folder_path.join(&file_id);
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.persist_to(&file_path).await {
data.move_copy_to(file_path).await?
}
create_dir_all(&folder_path).await?;
persist_temp_file(data, file_path).await?;
let mut data_value: Value = serde_json::from_str(&send.data)?;
if let Some(o) = data_value.as_object_mut() {
@ -363,7 +363,7 @@ async fn post_send_file_v2_data(
) -> EmptyResult {
enforce_disable_send_policy(&headers, &mut conn).await?;
let mut data = data.into_inner();
let data = data.into_inner();
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
@ -406,19 +406,18 @@ async fn post_send_file_v2_data(
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
}
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id);
let folder_path = canonicalize(&CONFIG.sends_folder()).await?.join(send_id);
let file_path = folder_path.join(file_id);
// Check if the file already exists, if that is the case do not overwrite it
if tokio::fs::metadata(&file_path).await.is_ok() {
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
match file_exists(&file_path).await {
Ok(true) => err!("Send file has already been uploaded.", format!("File {file_path:?} already exists")),
Ok(false) => (),
Err(e) => err!("Error creating send file.", format!("Error checking if send file {file_path:?} already exists: {e}")),
}
tokio::fs::create_dir_all(&folder_path).await?;
if let Err(_err) = data.data.persist_to(&file_path).await {
data.data.move_copy_to(file_path).await?
}
create_dir_all(&folder_path).await?;
persist_temp_file(data.data, file_path).await?;
nt.send_send_update(
UpdateType::SyncSendCreate,
@ -551,12 +550,22 @@ async fn post_access_file(
)
.await;
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
let token = crate::auth::encode_jwt(&token_claims);
let file_path = canonicalize(&CONFIG.sends_folder())
.await?
.join(&send_id)
.join(&file_id);
let url = download_url(file_path, &host.host)
.await
.map_err(|e| Error::new(
"Failed to generate send download URL",
format!("Failed to generate send URL for send_id: {send_id}, file_id: {file_id}. Error: {e:?}")
))?;
Ok(Json(json!({
"object": "send-fileDownload",
"id": file_id,
"url": format!("{}/api/sends/{}/{}?t={}", &host.host, send_id, file_id, token)
"url": url
})))
}

View file

@ -258,7 +258,7 @@ pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiRes
}
.map_res("Can't fetch Duo Keys")?;
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
Ok((data.ik, data.sk, CONFIG.get_duo_akey().await, data.host))
}
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {

View file

@ -2,7 +2,7 @@ use std::{
collections::HashMap,
net::IpAddr,
sync::Arc,
time::{Duration, SystemTime},
time::Duration,
};
use bytes::{Bytes, BytesMut};
@ -14,15 +14,12 @@ use reqwest::{
Client, Response,
};
use rocket::{http::ContentType, response::Redirect, Route};
use tokio::{
fs::{create_dir_all, remove_file, symlink_metadata, File},
io::{AsyncReadExt, AsyncWriteExt},
};
use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer};
use crate::{
error::Error,
persistent_fs::{create_dir_all, file_is_expired, read, remove_file, write},
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
util::Cached,
CONFIG,
@ -207,23 +204,7 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
}
// Try to read the cached icon, and return it if it exists
if let Ok(mut f) = File::open(path).await {
let mut buffer = Vec::new();
if f.read_to_end(&mut buffer).await.is_ok() {
return Some(buffer);
}
}
None
}
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
let meta = symlink_metadata(path).await?;
let modified = meta.modified()?;
let age = SystemTime::now().duration_since(modified)?;
Ok(ttl > 0 && ttl <= age.as_secs())
read(path).await.ok()
}
async fn icon_is_negcached(path: &str) -> bool {
@ -569,13 +550,15 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
}
async fn save_icon(path: &str, icon: &[u8]) {
match File::create(path).await {
Ok(mut f) => {
f.write_all(icon).await.expect("Error writing icon file");
}
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
match write(path, icon).await {
Ok(_) => (),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
}
if let Err(e) = write(path, icon).await {
warn!("Unable to save icon: {:?}", e);
}
},
Err(e) => {
warn!("Unable to save icon: {:?}", e);
}

View file

@ -9,8 +9,6 @@ use serde::de::DeserializeOwned;
use serde::ser::Serialize;
use std::{
env,
fs::File,
io::{Read, Write},
net::IpAddr,
};
@ -18,7 +16,7 @@ use crate::db::models::{
AttachmentId, CipherId, CollectionId, DeviceId, EmergencyAccessId, MembershipId, OrgApiKeyId, OrganizationId,
SendFileId, SendId, UserId,
};
use crate::{error::Error, CONFIG};
use crate::{error::Error, CONFIG, persistent_fs::{read, write}};
const JWT_ALGORITHM: Algorithm = Algorithm::RS256;
@ -40,37 +38,31 @@ static JWT_REGISTER_VERIFY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|regis
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
pub fn initialize_keys() -> Result<(), Error> {
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> {
let mut priv_key_buffer = Vec::with_capacity(2048);
let mut priv_key_file = File::options()
.create(create_if_missing)
.truncate(false)
.read(true)
.write(create_if_missing)
.open(CONFIG.private_rsa_key())?;
#[allow(clippy::verbose_file_reads)]
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
let rsa_key = if bytes_read > 0 {
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
} else if create_if_missing {
// Only create the key if the file doesn't exist or is empty
let rsa_key = Rsa::generate(2048)?;
priv_key_buffer = rsa_key.private_key_to_pem()?;
priv_key_file.write_all(&priv_key_buffer)?;
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
rsa_key
} else {
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
pub async fn initialize_keys() -> Result<(), Error> {
async fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), std::io::Error> {
let priv_key_buffer = match read(&CONFIG.private_rsa_key()).await {
Ok(buffer) => Some(buffer),
Err(e) if e.kind() == std::io::ErrorKind::NotFound && create_if_missing => None,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Err(e),
Err(e) => return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Error reading private key: {e}"))),
};
Ok((rsa_key, priv_key_buffer))
if let Some(priv_key_buffer) = priv_key_buffer {
Ok((Rsa::private_key_from_pem(&priv_key_buffer)?, priv_key_buffer))
} else {
let rsa_key = Rsa::generate(2048)?;
let priv_key_buffer = rsa_key.private_key_to_pem()?;
write(&CONFIG.private_rsa_key(), &priv_key_buffer).await?;
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
Err(std::io::Error::new(std::io::ErrorKind::NotFound, "Private key created, forcing attempt to read it again"))
}
}
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?;
let (priv_key, priv_key_buffer) = match read_key(true).await {
Ok(key) => key,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => read_key(false).await?,
Err(e) => return Err(e.into()),
};
let pub_key_buffer = priv_key.public_key_to_pem()?;
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;

24
src/aws.rs Normal file
View file

@ -0,0 +1,24 @@
use std::io::{Error, ErrorKind};
// Cache the AWS SDK config, as recommended by the AWS SDK documentation. The
// initial load is async, so we spawn a thread to load it and then join it to
// get the result in a blocking fashion.
static AWS_SDK_CONFIG: std::sync::LazyLock<std::io::Result<aws_config::SdkConfig>> = std::sync::LazyLock::new(|| {
std::thread::spawn(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
std::io::Result::Ok(rt.block_on(aws_config::load_defaults(aws_config::BehaviorVersion::latest())))
})
.join()
.map_err(|e| Error::new(ErrorKind::Other, format!("Failed to load AWS config for DSQL connection: {e:#?}")))?
.map_err(|e| Error::new(ErrorKind::Other, format!("Failed to load AWS config for DSQL connection: {e}")))
});
pub(crate) fn aws_sdk_config() -> std::io::Result<&'static aws_config::SdkConfig> {
(*AWS_SDK_CONFIG).as_ref().map_err(|e| match e.get_ref() {
Some(inner) => Error::new(e.kind(), inner),
None => Error::from(e.kind()),
})
}

View file

@ -11,6 +11,7 @@ use job_scheduler_ng::Schedule;
use once_cell::sync::Lazy;
use reqwest::Url;
use crate::persistent_fs::{read, remove_file, write};
use crate::{
db::DbConnType,
error::Error,
@ -25,10 +26,26 @@ static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false);
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
Config::load().unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
std::thread::spawn(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
});
rt.block_on(Config::load())
.unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
})
})
.join()
.unwrap_or_else(|e| {
println!("Error loading config:\n {e:?}\n");
exit(12)
})
});
pub type Pass = String;
@ -110,8 +127,10 @@ macro_rules! make_config {
builder
}
fn from_file(path: &str) -> Result<Self, Error> {
let config_str = std::fs::read_to_string(path)?;
async fn from_file(path: &str) -> Result<Self, Error> {
let config_bytes = read(path).await?;
let config_str = String::from_utf8(config_bytes)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?;
println!("[INFO] Using saved config from `{path}` for configuration.\n");
serde_json::from_str(&config_str).map_err(Into::into)
}
@ -723,12 +742,14 @@ make_config! {
smtp_accept_invalid_certs: bool, true, def, false;
/// Accept Invalid Hostnames (Know the risks!) |> DANGEROUS: Allow invalid hostnames. This option introduces significant vulnerabilities to man-in-the-middle attacks!
smtp_accept_invalid_hostnames: bool, true, def, false;
/// Use AWS SES |> Whether to send mail via AWS Simple Email Service (SES)
use_aws_ses: bool, true, def, false;
},
/// Email 2FA Settings
email_2fa: _enable_email_2fa {
/// Enabled |> Disabling will prevent users from setting up new email 2FA and using existing email 2FA configured
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && (c.smtp_host.is_some() || c.use_sendmail);
_enable_email_2fa: bool, true, auto, |c| c._enable_smtp && (c.smtp_host.is_some() || c.use_sendmail || c.use_aws_ses);
/// Email token size |> Number of digits in an email 2FA token (min: 6, max: 255). Note that the Bitwarden clients are hardcoded to mention 6 digit codes regardless of this setting.
email_token_size: u8, true, def, 6;
/// Token expiration time |> Maximum time in seconds a token is valid. The time the user has to open email client and copy token.
@ -936,6 +957,9 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
}
}
}
} else if cfg.use_aws_ses {
#[cfg(not(ses))]
err!("`USE_AWS_SES` is set, but the `ses` feature is not enabled in this build");
} else {
if cfg.smtp_host.is_some() == cfg.smtp_from.is_empty() {
err!("Both `SMTP_HOST` and `SMTP_FROM` need to be set for email support without `USE_SENDMAIL`")
@ -946,7 +970,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
}
}
if (cfg.smtp_host.is_some() || cfg.use_sendmail) && !is_valid_email(&cfg.smtp_from) {
if (cfg.smtp_host.is_some() || cfg.use_sendmail || cfg.use_aws_ses) && !is_valid_email(&cfg.smtp_from) {
err!(format!("SMTP_FROM '{}' is not a valid email address", cfg.smtp_from))
}
@ -955,7 +979,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
}
}
if cfg._enable_email_2fa && !(cfg.smtp_host.is_some() || cfg.use_sendmail) {
if cfg._enable_email_2fa && !(cfg.smtp_host.is_some() || cfg.use_sendmail || cfg.use_aws_ses) {
err!("To enable email 2FA, a mail transport must be configured")
}
@ -1133,10 +1157,10 @@ fn smtp_convert_deprecated_ssl_options(smtp_ssl: Option<bool>, smtp_explicit_tls
}
impl Config {
pub fn load() -> Result<Self, Error> {
pub async fn load() -> Result<Self, Error> {
// Loading from env and file
let _env = ConfigBuilder::from_env();
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).await.unwrap_or_default();
// Create merged config, config file overwrites env
let mut _overrides = Vec::new();
@ -1160,7 +1184,7 @@ impl Config {
})
}
pub fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
pub async fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
// Remove default values
//let builder = other.remove(&self.inner.read().unwrap()._env);
@ -1192,20 +1216,18 @@ impl Config {
}
//Save to file
use std::{fs::File, io::Write};
let mut file = File::create(&*CONFIG_FILE)?;
file.write_all(config_str.as_bytes())?;
write(&*CONFIG_FILE, config_str.as_bytes()).await?;
Ok(())
}
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
async fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
let builder = {
let usr = &self.inner.read().unwrap()._usr;
let mut _overrides = Vec::new();
usr.merge(&other, false, &mut _overrides)
};
self.update_config(builder, false)
self.update_config(builder, false).await
}
/// Tests whether an email's domain is allowed. A domain is allowed if it
@ -1247,8 +1269,8 @@ impl Config {
}
}
pub fn delete_user_config(&self) -> Result<(), Error> {
std::fs::remove_file(&*CONFIG_FILE)?;
pub async fn delete_user_config(&self) -> Result<(), Error> {
remove_file(&*CONFIG_FILE).await?;
// Empty user config
let usr = ConfigBuilder::default();
@ -1275,10 +1297,10 @@ impl Config {
}
pub fn mail_enabled(&self) -> bool {
let inner = &self.inner.read().unwrap().config;
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail || inner.use_aws_ses)
}
pub fn get_duo_akey(&self) -> String {
pub async fn get_duo_akey(&self) -> String {
if let Some(akey) = self._duo_akey() {
akey
} else {
@ -1289,7 +1311,7 @@ impl Config {
_duo_akey: Some(akey_s.clone()),
..Default::default()
};
self.update_config_partial(builder).ok();
self.update_config_partial(builder).await.ok();
akey_s
}

163
src/db/dsql.rs Normal file
View file

@ -0,0 +1,163 @@
use std::sync::RwLock;
use diesel::{
r2d2::{ManageConnection, R2D2Connection},
ConnectionError,
};
use url::Url;
#[derive(Debug)]
pub struct ConnectionManager<T> {
inner: RwLock<diesel::r2d2::ConnectionManager<T>>,
#[cfg(dsql)]
dsql_url: Option<String>,
}
impl<T> ConnectionManager<T> {
/// Returns a new connection manager,
/// which establishes connections to the given database URL.
pub fn new<S: Into<String>>(database_url: S) -> Self {
let database_url = database_url.into();
Self {
inner: RwLock::new(diesel::r2d2::ConnectionManager::new(&database_url)),
#[cfg(dsql)]
dsql_url: if database_url.starts_with("dsql:") {
Some(database_url)
} else {
None
},
}
}
}
impl<T> ManageConnection for ConnectionManager<T>
where
T: R2D2Connection + Send + 'static,
{
type Connection = T;
type Error = diesel::r2d2::Error;
fn connect(&self) -> Result<T, Self::Error> {
#[cfg(dsql)]
if let Some(dsql_url) = &self.dsql_url {
let url = psql_url(dsql_url).map_err(|e| Self::Error::ConnectionError(e))?;
self.inner.write().expect("Failed to lock inner connection manager to set DSQL connection URL").update_database_url(&url);
}
self.inner.read().expect("Failed to lock inner connection manager to connect").connect()
}
fn is_valid(&self, conn: &mut T) -> Result<(), Self::Error> {
self.inner.read().expect("Failed to lock inner connection manager to check validity").is_valid(conn)
}
fn has_broken(&self, conn: &mut T) -> bool {
self.inner.read().expect("Failed to lock inner connection manager to check if has broken").has_broken(conn)
}
}
// Generate a Postgres libpq connection string. The input connection string has
// the following format:
//
// dsql://<dsql-id>.dsql.<aws-region>.on.aws
//
// The generated connection string will have the form:
//
// postgresql://<dsql-id>.dsql.<aws-region>.on.aws/postgres?sslmode=require&user=admin&password=<auth-token>
//
// The auth token is a temporary token generated by the AWS SDK for DSQL. It is
// valid for up to 15 minutes. We cache the last-generated token for each unique
// DSQL connection URL, and reuse it if it is less than 14 minutes old.
pub(crate) fn psql_url(url: &str) -> Result<String, ConnectionError> {
use std::{
collections::HashMap,
sync::{Arc, LazyLock, Mutex},
time::Duration,
};
struct PsqlUrl {
timestamp: std::time::Instant,
url: String,
}
static PSQL_URLS: LazyLock<Mutex<HashMap<String, Arc<Mutex<Option<PsqlUrl>>>>>> = LazyLock::new(|| Mutex::new(HashMap::new()));
let mut psql_urls = PSQL_URLS.lock().map_err(|e| ConnectionError::BadConnection(format!("Failed to lock PSQL URLs: {e}")))?;
let psql_url_lock = if let Some(existing_psql_url_lock) = psql_urls.get(url) {
existing_psql_url_lock.clone()
} else {
let psql_url_lock = Arc::new(Mutex::new(None));
psql_urls.insert(url.to_string(), psql_url_lock.clone());
psql_url_lock
};
let mut psql_url_lock_guard = psql_url_lock.lock().map_err(|e| ConnectionError::BadConnection(format!("Failed to lock PSQL url: {e}")))?;
drop(psql_urls);
if let Some(ref psql_url) = *psql_url_lock_guard {
if psql_url.timestamp.elapsed() < Duration::from_secs(14 * 60) {
debug!("Reusing DSQL auth token for connection '{url}'");
return Ok(psql_url.url.clone());
}
info!("Refreshing DSQL auth token for connection '{url}'");
} else {
info!("Generating new DSQL auth token for connection '{url}'");
}
let sdk_config = crate::aws::aws_sdk_config()
.map_err(|e| ConnectionError::BadConnection(format!("Failed to load AWS SDK config: {e}")))?;
let mut psql_url = Url::parse(url).map_err(|e| {
ConnectionError::InvalidConnectionUrl(e.to_string())
})?;
let host = psql_url.host_str().ok_or(ConnectionError::InvalidConnectionUrl("Missing hostname in connection URL".to_string()))?.to_string();
static DSQL_REGION_FROM_HOST_RE: LazyLock<regex::Regex> = LazyLock::new(|| {
regex::Regex::new(r"^[a-z0-9]+\.dsql\.(?P<region>[a-z0-9-]+)\.on\.aws$").expect("Failed to compile DSQL region regex")
});
let region = (*DSQL_REGION_FROM_HOST_RE).captures(&host).ok_or(ConnectionError::InvalidConnectionUrl("Failed to find AWS region in DSQL hostname".to_string()))?
.name("region")
.ok_or(ConnectionError::InvalidConnectionUrl("Failed to find AWS region in DSQL hostname".to_string()))?
.as_str()
.to_string();
let region = aws_config::Region::new(region);
let auth_config = aws_sdk_dsql::auth_token::Config::builder()
.hostname(host)
.region(region)
.build()
.map_err(|e| ConnectionError::BadConnection(format!("Failed to build AWS auth token signer config: {e}")))?;
let signer = aws_sdk_dsql::auth_token::AuthTokenGenerator::new(auth_config);
let now = std::time::Instant::now();
let auth_token = std::thread::spawn(move || {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
rt.block_on(signer.db_connect_admin_auth_token(sdk_config))
})
.join()
.map_err(|e| ConnectionError::BadConnection(format!("Failed to generate DSQL auth token: {e:#?}")))?
.map_err(|e| ConnectionError::BadConnection(format!("Failed to generate DSQL auth token: {e}")))?;
psql_url.set_scheme("postgresql").expect("Failed to set 'postgresql' as scheme for DSQL connection URL");
psql_url.set_path("postgres");
psql_url.query_pairs_mut()
.append_pair("sslmode", "require")
.append_pair("user", "admin")
.append_pair("password", auth_token.as_str());
psql_url_lock_guard.replace(PsqlUrl { timestamp: now, url: psql_url.to_string() });
Ok(psql_url.to_string())
}

View file

@ -1,8 +1,11 @@
#[cfg(dsql)]
mod dsql;
use std::{sync::Arc, time::Duration};
use diesel::{
connection::SimpleConnection,
r2d2::{ConnectionManager, CustomizeConnection, Pool, PooledConnection},
r2d2::{CustomizeConnection, Pool, PooledConnection},
};
use rocket::{
@ -21,6 +24,11 @@ use crate::{
CONFIG,
};
#[cfg(dsql)]
type ConnectionManager<T> = dsql::ConnectionManager<T>;
#[cfg(not(dsql))]
type ConnectionManager<T> = diesel::r2d2::ConnectionManager<T>;
#[cfg(sqlite)]
#[path = "schemas/sqlite/schema.rs"]
pub mod __sqlite_schema;
@ -130,7 +138,7 @@ macro_rules! generate_connections {
DbConnType::$name => {
#[cfg($name)]
{
paste::paste!{ [< $name _migrations >]::run_migrations()?; }
paste::paste!{ [< $name _migrations >]::run_migrations(&url)?; }
let manager = ConnectionManager::new(&url);
let pool = Pool::builder()
.max_size(CONFIG.database_max_conns())
@ -209,6 +217,14 @@ impl DbConnType {
#[cfg(not(postgresql))]
err!("`DATABASE_URL` is a PostgreSQL URL, but the 'postgresql' feature is not enabled")
// Amazon Aurora DSQL
} else if url.starts_with("dsql:") {
#[cfg(dsql)]
return Ok(DbConnType::postgresql);
#[cfg(not(dsql))]
err!("`DATABASE_URL` is a DSQL URL, but the 'dsql' feature is not enabled")
//Sqlite
} else {
#[cfg(sqlite)]
@ -429,13 +445,12 @@ mod sqlite_migrations {
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/sqlite");
pub fn run_migrations() -> Result<(), super::Error> {
pub fn run_migrations(url: &str) -> Result<(), super::Error> {
use diesel::{Connection, RunQueryDsl};
let url = crate::CONFIG.database_url();
// Establish a connection to the sqlite database (this will create a new one, if it does
// not exist, and exit if there is an error).
let mut connection = diesel::sqlite::SqliteConnection::establish(&url)?;
let mut connection = diesel::sqlite::SqliteConnection::establish(url)?;
// Run the migrations after successfully establishing a connection
// Disable Foreign Key Checks during migration
@ -459,10 +474,10 @@ mod mysql_migrations {
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/mysql");
pub fn run_migrations() -> Result<(), super::Error> {
pub fn run_migrations(url: &str) -> Result<(), super::Error> {
use diesel::{Connection, RunQueryDsl};
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
let mut connection = diesel::mysql::MysqlConnection::establish(&crate::CONFIG.database_url())?;
let mut connection = diesel::mysql::MysqlConnection::establish(url)?;
// Disable Foreign Key Checks during migration
// Scoped to a connection/session.
@ -480,10 +495,21 @@ mod postgresql_migrations {
use diesel_migrations::{EmbeddedMigrations, MigrationHarness};
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/postgresql");
pub fn run_migrations() -> Result<(), super::Error> {
pub fn run_migrations(url: &str) -> Result<(), super::Error> {
use diesel::Connection;
#[cfg(dsql)]
if url.starts_with("dsql:") {
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/dsql");
let psql_url = crate::db::dsql::psql_url(url)?;
let mut connection = diesel::pg::PgConnection::establish(&psql_url)?;
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
return Ok(())
}
// Make sure the database is up to date (create if it doesn't exist, or run the migrations)
let mut connection = diesel::pg::PgConnection::establish(&crate::CONFIG.database_url())?;
let mut connection = diesel::pg::PgConnection::establish(url)?;
connection.run_pending_migrations(MIGRATIONS).expect("Error running migrations");
Ok(())
}

View file

@ -5,6 +5,7 @@ use derive_more::{AsRef, Deref, Display};
use serde_json::Value;
use super::{CipherId, OrganizationId, UserId};
use crate::persistent_fs::{download_url, remove_file};
use crate::CONFIG;
use macros::IdFromParam;
@ -44,29 +45,32 @@ impl Attachment {
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
}
pub fn get_url(&self, host: &str) -> String {
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
format!("{}/attachments/{}/{}?token={}", host, self.cipher_uuid, self.id, token)
pub async fn get_url(&self, host: &str) -> Result<String, Error> {
download_url(self.get_file_path(), host)
.await
.map_err(|e| Error::new(
"Failed to generate attachment download URL",
format!("Failed to generate download URL for attachment cipher_uuid: {}, id: {}. Error: {e:?}", self.cipher_uuid, self.id)
))
}
pub fn to_json(&self, host: &str) -> Value {
json!({
pub async fn to_json(&self, host: &str) -> Result<Value, Error> {
Ok(json!({
"id": self.id,
"url": self.get_url(host),
"url": self.get_url(host).await?,
"fileName": self.file_name,
"size": self.file_size.to_string(),
"sizeName": crate::util::get_display_size(self.file_size),
"key": self.akey,
"object": "attachment"
})
}))
}
}
use crate::auth::{encode_jwt, generate_file_download_claims};
use crate::db::DbConn;
use crate::api::EmptyResult;
use crate::error::MapResult;
use crate::error::{Error, MapResult};
/// Database methods
impl Attachment {
@ -103,6 +107,19 @@ impl Attachment {
}
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
let file_path = &self.get_file_path();
if let Err(e) = remove_file(file_path).await {
// Ignore "file not found" errors. This can happen when the
// upstream caller has already cleaned up the file as part of
// its own error handling.
if e.kind() == ErrorKind::NotFound {
debug!("File '{}' already deleted.", file_path);
} else {
return Err(e.into());
}
}
db_run! { conn: {
let _: () = crate::util::retry(
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
@ -110,19 +127,7 @@ impl Attachment {
)
.map_res("Error deleting attachment")?;
let file_path = &self.get_file_path();
match std::fs::remove_file(file_path) {
// Ignore "file not found" errors. This can happen when the
// upstream caller has already cleaned up the file as part of
// its own error handling.
Err(e) if e.kind() == ErrorKind::NotFound => {
debug!("File '{}' already deleted.", file_path);
Ok(())
}
Err(e) => Err(e.into()),
_ => Ok(()),
}
Ok(())
}}
}

View file

@ -130,7 +130,7 @@ impl Cipher {
use crate::db::DbConn;
use crate::api::EmptyResult;
use crate::error::MapResult;
use crate::error::{Error, MapResult};
/// Database methods
impl Cipher {
@ -141,18 +141,28 @@ impl Cipher {
cipher_sync_data: Option<&CipherSyncData>,
sync_type: CipherSyncType,
conn: &mut DbConn,
) -> Value {
) -> Result<Value, Error> {
use crate::util::{format_date, validate_and_format_date};
let mut attachments_json: Value = Value::Null;
if let Some(cipher_sync_data) = cipher_sync_data {
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
if !attachments.is_empty() {
let mut attachments_json_vec = vec![];
for attachment in attachments {
attachments_json_vec.push(attachment.to_json(host).await?);
}
attachments_json = Value::Array(attachments_json_vec);
}
}
} else {
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
if !attachments.is_empty() {
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
let mut attachments_json_vec = vec![];
for attachment in attachments {
attachments_json_vec.push(attachment.to_json(host).await?);
}
attachments_json = Value::Array(attachments_json_vec);
}
}
@ -384,7 +394,7 @@ impl Cipher {
};
json_object[key] = type_data_json;
json_object
Ok(json_object)
}
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {

View file

@ -1,6 +1,7 @@
use chrono::{NaiveDateTime, Utc};
use serde_json::Value;
use crate::persistent_fs::remove_dir_all;
use crate::util::LowerCase;
use super::{OrganizationId, User, UserId};
@ -226,7 +227,7 @@ impl Send {
self.update_users_revision(conn).await;
if self.atype == SendType::File as i32 {
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok();
remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).await.ok();
}
db_run! { conn: {

View file

@ -95,6 +95,44 @@ fn smtp_transport() -> AsyncSmtpTransport<Tokio1Executor> {
smtp_client.build()
}
#[cfg(ses)]
async fn send_with_aws_ses(email: Message) -> std::io::Result<()> {
use std::io::Error;
use aws_sdk_sesv2::{types::{EmailContent, RawMessage}, Client};
use crate::aws::aws_sdk_config;
fn sesv2_client() -> std::io::Result<Client> {
static AWS_SESV2_CLIENT: std::sync::LazyLock<std::io::Result<Client>> = std::sync::LazyLock::new(|| {
Ok(Client::new(aws_sdk_config()?))
});
(*AWS_SESV2_CLIENT)
.as_ref()
.map(|client| client.clone())
.map_err(|e| match e.get_ref() {
Some(inner) => Error::new(e.kind(), inner),
None => Error::from(e.kind()),
})
}
sesv2_client()?
.send_email()
.content(
EmailContent::builder().raw(
RawMessage::builder()
.data(email.formatted().into())
.build()
.map_err(|e| Error::other(format!("Failed to build AWS SESv2 RawMessage: {e:#?}")))?
)
.build()
)
.send()
.await
.map_err(|e| Error::other(e))?;
Ok(())
}
// This will sanitize the string values by stripping all the html tags to prevent XSS and HTML Injections
fn sanitize_data(data: &mut serde_json::Value) {
use regex::Regex;
@ -626,6 +664,15 @@ async fn send_with_selected_transport(email: Message) -> EmptyResult {
}
}
}
} else if CONFIG.use_aws_ses() {
#[cfg(ses)]
match send_with_aws_ses(email).await {
Ok(_) => Ok(()),
Err(e) => err!("Failed to send email", format!("Failed to send email using AWS SES: {e:?}"))
}
#[cfg(not(ses))]
err!("Failed to send email", "Failed to send email using AWS SES: `ses` feature is not enabled");
} else {
match smtp_transport().send(email).await {
Ok(_) => Ok(()),

View file

@ -29,7 +29,7 @@ extern crate diesel_derive_newtype;
use std::{
collections::HashMap,
fs::{canonicalize, create_dir_all},
fs::canonicalize,
panic,
path::Path,
process::exit,
@ -45,6 +45,9 @@ use tokio::{
#[cfg(unix)]
use tokio::signal::unix::SignalKind;
#[cfg(any(dsql, s3, ses))]
mod aws;
#[macro_use]
mod error;
mod api;
@ -53,6 +56,7 @@ mod config;
mod crypto;
#[macro_use]
mod db;
mod persistent_fs;
mod http_client;
mod mail;
mod ratelimit;
@ -61,6 +65,7 @@ mod util;
use crate::api::core::two_factor::duo_oidc::purge_duo_contexts;
use crate::api::purge_auth_requests;
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
use crate::persistent_fs::{create_dir_all, path_exists, path_is_dir};
pub use config::CONFIG;
pub use error::{Error, MapResult};
use rocket::data::{Limits, ToByteUnit};
@ -75,16 +80,16 @@ async fn main() -> Result<(), Error> {
let level = init_logging()?;
check_data_folder().await;
auth::initialize_keys().unwrap_or_else(|e| {
auth::initialize_keys().await.unwrap_or_else(|e| {
error!("Error creating private key '{}'\n{e:?}\nExiting Vaultwarden!", CONFIG.private_rsa_key());
exit(1);
});
check_web_vault();
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
create_dir(&CONFIG.tmp_folder(), "tmp folder");
create_dir(&CONFIG.sends_folder(), "sends folder");
create_dir(&CONFIG.attachments_folder(), "attachments folder");
create_dir(&CONFIG.icon_cache_folder(), "icon cache").await;
create_dir(&CONFIG.tmp_folder(), "tmp folder").await;
create_dir(&CONFIG.sends_folder(), "sends folder").await;
create_dir(&CONFIG.attachments_folder(), "attachments folder").await;
let pool = create_db_pool().await;
schedule_jobs(pool.clone());
@ -459,16 +464,16 @@ fn chain_syslog(logger: fern::Dispatch) -> fern::Dispatch {
}
}
fn create_dir(path: &str, description: &str) {
async fn create_dir(path: &str, description: &str) {
// Try to create the specified dir, if it doesn't already exist.
let err_msg = format!("Error creating {description} directory '{path}'");
create_dir_all(path).expect(&err_msg);
create_dir_all(path).await.expect(&err_msg);
}
async fn check_data_folder() {
let data_folder = &CONFIG.data_folder();
let path = Path::new(data_folder);
if !path.exists() {
if !path_exists(path).await.unwrap_or(false) {
error!("Data folder '{}' doesn't exist.", data_folder);
if is_running_in_container() {
error!("Verify that your data volume is mounted at the correct location.");
@ -477,7 +482,7 @@ async fn check_data_folder() {
}
exit(1);
}
if !path.is_dir() {
if !path_is_dir(path).await.unwrap_or(false) {
error!("Data folder '{}' is not a directory.", data_folder);
exit(1);
}

141
src/persistent_fs/local.rs Normal file
View file

@ -0,0 +1,141 @@
use std::{io::{Error, ErrorKind}, path::{Path, PathBuf}, time::SystemTime};
use rocket::fs::TempFile;
use tokio::{fs::{File, OpenOptions}, io::{AsyncReadExt, AsyncWriteExt}};
use super::PersistentFSBackend;
pub(crate) struct LocalFSBackend(String);
impl AsRef<Path> for LocalFSBackend {
fn as_ref(&self) -> &Path {
self.0.as_ref()
}
}
impl PersistentFSBackend for LocalFSBackend {
fn new<P: AsRef<Path>>(path: P) -> std::io::Result<Self> {
Ok(Self(path
.as_ref()
.to_str()
.ok_or_else(||
Error::new(
ErrorKind::InvalidInput,
"Data folder path {path:?} is not valid UTF-8"
)
)?
.to_string()
))
}
async fn read(self) -> std::io::Result<Vec<u8>> {
let mut file = File::open(self).await?;
let mut buffer = Vec::new();
file.read_to_end(&mut buffer).await?;
Ok(buffer)
}
async fn write(self, buf: &[u8]) -> std::io::Result<()> {
let mut file = OpenOptions::new().create(true).truncate(true).write(true).open(self).await?;
file.write_all(buf).await?;
Ok(())
}
async fn path_exists(self) -> std::io::Result<bool> {
match tokio::fs::metadata(self).await {
Ok(_) => Ok(true),
Err(e) => match e.kind() {
ErrorKind::NotFound => Ok(false),
_ => Err(e),
},
}
}
async fn file_exists(self) -> std::io::Result<bool> {
match tokio::fs::metadata(self).await {
Ok(metadata) => Ok(metadata.is_file()),
Err(e) => match e.kind() {
ErrorKind::NotFound => Ok(false),
_ => Err(e),
},
}
}
async fn path_is_dir(self) -> std::io::Result<bool> {
match tokio::fs::metadata(self).await {
Ok(metadata) => Ok(metadata.is_dir()),
Err(e) => match e.kind() {
ErrorKind::NotFound => Ok(false),
_ => Err(e),
},
}
}
async fn canonicalize(self) -> std::io::Result<PathBuf> {
tokio::fs::canonicalize(self).await
}
async fn create_dir_all(self) -> std::io::Result<()> {
tokio::fs::create_dir_all(self).await
}
async fn persist_temp_file(self, mut temp_file: TempFile<'_>) -> std::io::Result<()> {
if temp_file.persist_to(&self).await.is_err() {
temp_file.move_copy_to(self).await?;
}
Ok(())
}
async fn remove_file(self) -> std::io::Result<()> {
tokio::fs::remove_file(self).await
}
async fn remove_dir_all(self) -> std::io::Result<()> {
tokio::fs::remove_dir_all(self).await
}
async fn last_modified(self) -> std::io::Result<SystemTime> {
tokio::fs::symlink_metadata(self)
.await?
.modified()
}
async fn download_url(self, local_host: &str) -> std::io::Result<String> {
use std::sync::LazyLock;
use crate::{
auth::{encode_jwt, generate_file_download_claims, generate_send_claims},
db::models::{AttachmentId, CipherId, SendId, SendFileId},
CONFIG
};
let LocalFSBackend(path) = self;
static ATTACHMENTS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{}/", CONFIG.attachments_folder()));
static SENDS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{}/", CONFIG.sends_folder()));
if path.starts_with(&*ATTACHMENTS_PREFIX) {
let attachment_parts = path.trim_start_matches(&*ATTACHMENTS_PREFIX).split('/').collect::<Vec<&str>>();
let [cipher_uuid, attachment_id] = attachment_parts[..] else {
return Err(Error::new(ErrorKind::InvalidInput, format!("Attachment path {path:?} does not match a known download URL path pattern")));
};
let token = encode_jwt(&generate_file_download_claims(CipherId::from(cipher_uuid.to_string()), AttachmentId(attachment_id.to_string())));
Ok(format!("{}/attachments/{}/{}?token={}", local_host, cipher_uuid, attachment_id, token))
} else if path.starts_with(&*SENDS_PREFIX) {
let send_parts = path.trim_start_matches(&*SENDS_PREFIX).split('/').collect::<Vec<&str>>();
let [send_id, file_id] = send_parts[..] else {
return Err(Error::new(ErrorKind::InvalidInput, format!("Send path {path:?} does not match a known download URL path pattern")));
};
let token = encode_jwt(&generate_send_claims(&SendId::from(send_id.to_string()), &SendFileId::from(file_id.to_string())));
Ok(format!("{}/api/sends/{}/{}?t={}", local_host, send_id, file_id, token))
} else {
Err(Error::new(ErrorKind::InvalidInput, "Data folder path {path:?} does not match a known download URL path pattern"))
}
}
}

316
src/persistent_fs/mod.rs Normal file
View file

@ -0,0 +1,316 @@
mod local;
#[cfg(s3)]
mod s3;
use std::{io::{Error, ErrorKind}, path::{Path, PathBuf}, time::SystemTime};
use rocket::fs::TempFile;
enum FSType {
Local(local::LocalFSBackend),
#[cfg(s3)]
S3(s3::S3FSBackend),
}
pub(crate) trait PersistentFSBackend: Sized {
fn new<P: AsRef<Path>>(path: P) -> std::io::Result<Self>;
async fn read(self) -> std::io::Result<Vec<u8>>;
async fn write(self, buf: &[u8]) -> std::io::Result<()>;
async fn path_exists(self) -> std::io::Result<bool>;
async fn file_exists(self) -> std::io::Result<bool>;
async fn path_is_dir(self) -> std::io::Result<bool>;
async fn canonicalize(self) -> std::io::Result<PathBuf>;
async fn create_dir_all(self) -> std::io::Result<()>;
async fn persist_temp_file(self, temp_file: TempFile<'_>) -> std::io::Result<()>;
async fn remove_file(self) -> std::io::Result<()>;
async fn remove_dir_all(self) -> std::io::Result<()>;
async fn last_modified(self) -> std::io::Result<SystemTime>;
async fn download_url(self, local_host: &str) -> std::io::Result<String>;
}
impl PersistentFSBackend for FSType {
fn new<P: AsRef<Path>>(path: P) -> std::io::Result<Self> {
#[cfg(s3)]
if path.as_ref().starts_with("s3://") {
return Ok(FSType::S3(s3::S3FSBackend::new(path)?));
}
Ok(FSType::Local(local::LocalFSBackend::new(path)?))
}
async fn read(self) -> std::io::Result<Vec<u8>> {
match self {
FSType::Local(fs) => fs.read().await,
#[cfg(s3)]
FSType::S3(fs) => fs.read().await,
}
}
async fn write(self, buf: &[u8]) -> std::io::Result<()> {
match self {
FSType::Local(fs) => fs.write(buf).await,
#[cfg(s3)]
FSType::S3(fs) => fs.write(buf).await,
}
}
async fn path_exists(self) -> std::io::Result<bool> {
match self {
FSType::Local(fs) => fs.path_exists().await,
#[cfg(s3)]
FSType::S3(fs) => fs.path_exists().await,
}
}
async fn file_exists(self) -> std::io::Result<bool> {
match self {
FSType::Local(fs) => fs.file_exists().await,
#[cfg(s3)]
FSType::S3(fs) => fs.file_exists().await,
}
}
async fn path_is_dir(self) -> std::io::Result<bool> {
match self {
FSType::Local(fs) => fs.path_is_dir().await,
#[cfg(s3)]
FSType::S3(fs) => fs.path_is_dir().await,
}
}
async fn canonicalize(self) -> std::io::Result<PathBuf> {
match self {
FSType::Local(fs) => fs.canonicalize().await,
#[cfg(s3)]
FSType::S3(fs) => fs.canonicalize().await,
}
}
async fn create_dir_all(self) -> std::io::Result<()> {
match self {
FSType::Local(fs) => fs.create_dir_all().await,
#[cfg(s3)]
FSType::S3(fs) => fs.create_dir_all().await,
}
}
async fn persist_temp_file(self, temp_file: TempFile<'_>) -> std::io::Result<()> {
match self {
FSType::Local(fs) => fs.persist_temp_file(temp_file).await,
#[cfg(s3)]
FSType::S3(fs) => fs.persist_temp_file(temp_file).await,
}
}
async fn remove_file(self) -> std::io::Result<()> {
match self {
FSType::Local(fs) => fs.remove_file().await,
#[cfg(s3)]
FSType::S3(fs) => fs.remove_file().await,
}
}
async fn remove_dir_all(self) -> std::io::Result<()> {
match self {
FSType::Local(fs) => fs.remove_dir_all().await,
#[cfg(s3)]
FSType::S3(fs) => fs.remove_dir_all().await,
}
}
async fn last_modified(self) -> std::io::Result<SystemTime> {
match self {
FSType::Local(fs) => fs.last_modified().await,
#[cfg(s3)]
FSType::S3(fs) => fs.last_modified().await,
}
}
async fn download_url(self, local_host: &str) -> std::io::Result<String> {
match self {
FSType::Local(fs) => fs.download_url(local_host).await,
#[cfg(s3)]
FSType::S3(fs) => fs.download_url(local_host).await,
}
}
}
/// Reads the contents of a file at the given path.
///
/// # Arguments
///
/// * `path` - A reference to the path of the file to read.
///
/// # Returns
///
/// * `std::io::Result<Vec<u8>>` - A result containing a vector of bytes with the
/// file contents if successful, or an I/O error.
pub(crate) async fn read<P: AsRef<Path>>(path: P) -> std::io::Result<Vec<u8>> {
FSType::new(path)?.read().await
}
/// Writes data to a file at the given path.
///
/// If the file does not exist, it will be created. If it does exist, it will be
/// overwritten.
///
/// # Arguments
///
/// * `path` - A reference to the path of the file to write.
/// * `buf` - A byte slice containing the data to write to the file.
///
/// # Returns
///
/// * `std::io::Result<()>` - A result indicating success or an I/O error.
pub(crate) async fn write<P: AsRef<Path>>(path: P, buf: &[u8]) -> std::io::Result<()> {
FSType::new(path)?.write(buf).await
}
/// Checks whether a path exists.
///
/// This function returns `true` in all cases where the path exists, including
/// as a file, directory, or symlink.
///
/// # Arguments
///
/// * `path` - A reference to the path to check.
///
/// # Returns
///
/// * `std::io::Result<bool>` - A result containing a boolean value indicating
/// whether the path exists.
pub(crate) async fn path_exists<P: AsRef<Path>>(path: P) -> std::io::Result<bool> {
FSType::new(path)?.path_exists().await
}
/// Checks whether a regular file exists at the given path.
///
/// This function returns `false` if the path is a symlink.
///
/// # Arguments
///
/// * `path` - A reference to the path to check.
///
/// # Returns
///
/// * `std::io::Result<bool>` - A result containing a boolean value indicating
/// whether a regular file exists at the given path.
pub(crate) async fn file_exists<P: AsRef<Path>>(path: P) -> std::io::Result<bool> {
FSType::new(path)?.file_exists().await
}
/// Checks whether a directory exists at the given path.
///
/// This function returns `false` if the path is a symlink.
///
/// # Arguments
///
/// * `path` - A reference to the path to check.
///
/// # Returns
///
/// * `std::io::Result<bool>` - A result containing a boolean value indicating
/// whether a directory exists at the given path.
pub(crate) async fn path_is_dir<P: AsRef<Path>>(path: P) -> std::io::Result<bool> {
FSType::new(path)?.path_is_dir().await
}
/// Canonicalizes the given path.
///
/// This function resolves the given path to an absolute path, eliminating any
/// symbolic links and relative path components.
///
/// # Arguments
///
/// * `path` - A reference to the path to canonicalize.
///
/// # Returns
///
/// * `std::io::Result<PathBuf>` - A result containing the canonicalized path if successful,
/// or an I/O error.
pub(crate) async fn canonicalize<P: AsRef<Path>>(path: P) -> std::io::Result<PathBuf> {
FSType::new(path)?.canonicalize().await
}
/// Creates a directory and all its parent components as needed.
///
/// # Arguments
///
/// * `path` - A reference to the path of the directory to create.
///
/// # Returns
///
/// * `std::io::Result<()>` - A result indicating success or an I/O error.
pub(crate) async fn create_dir_all<P: AsRef<Path>>(path: P) -> std::io::Result<()> {
FSType::new(path)?.create_dir_all().await
}
/// Persists a temporary file to a permanent location.
///
/// # Arguments
///
/// * `temp_file` - The temporary file to persist.
/// * `path` - A reference to the path where the file should be persisted.
///
/// # Returns
///
/// * `std::io::Result<()>` - A result indicating success or an I/O error.
pub(crate) async fn persist_temp_file<P: AsRef<Path>>(temp_file: TempFile<'_>, path: P) -> std::io::Result<()> {
FSType::new(path)?.persist_temp_file(temp_file).await
}
/// Removes a file at the given path.
///
/// # Arguments
///
/// * `path` - A reference to the path of the file to remove.
///
/// # Returns
///
/// * `std::io::Result<()>` - A result indicating success or an I/O error.
pub(crate) async fn remove_file<P: AsRef<Path>>(path: P) -> std::io::Result<()> {
FSType::new(path)?.remove_file().await
}
/// Removes a directory and all its contents at the given path.
///
/// # Arguments
///
/// * `path` - A reference to the path of the directory to remove.
///
/// # Returns
///
/// * `std::io::Result<()>` - A result indicating success or an I/O error.
pub(crate) async fn remove_dir_all<P: AsRef<Path>>(path: P) -> std::io::Result<()> {
FSType::new(path)?.remove_dir_all().await
}
pub(crate) async fn file_is_expired<P: AsRef<Path>>(path: P, ttl: u64) -> Result<bool, Error> {
let path = path.as_ref();
let modified = FSType::new(path)?.last_modified().await?;
let age = SystemTime::now().duration_since(modified)
.map_err(|e| Error::new(
ErrorKind::InvalidData,
format!("Failed to determine file age for {path:?} from last modified timestamp '{modified:#?}': {e:?}"
)))?;
Ok(ttl > 0 && ttl <= age.as_secs())
}
/// Generates a pre-signed url to download attachment and send files.
///
/// # Arguments
///
/// * `path` - A reference to the path of the file to read.
/// * `local_host` - This API server host.
///
/// # Returns
///
/// * `std::io::Result<String>` - A result containing the url if successful, or an I/O error.
pub(crate) async fn download_url<P: AsRef<Path>>(path: P, local_host: &str) -> std::io::Result<String> {
FSType::new(path)?.download_url(local_host).await
}

316
src/persistent_fs/s3.rs Normal file
View file

@ -0,0 +1,316 @@
use std::{io::{Error, ErrorKind}, path::{Path, PathBuf}, time::SystemTime};
use aws_sdk_s3::{client::Client, primitives::ByteStream, types::StorageClass::IntelligentTiering};
use rocket::{fs::TempFile, http::ContentType};
use tokio::{fs::File, io::AsyncReadExt};
use url::Url;
use crate::aws::aws_sdk_config;
use super::PersistentFSBackend;
pub(crate) struct S3FSBackend {
path: PathBuf,
bucket: String,
key: String,
}
fn s3_client() -> std::io::Result<Client> {
static AWS_S3_CLIENT: std::sync::LazyLock<std::io::Result<Client>> = std::sync::LazyLock::new(|| {
Ok(Client::new(aws_sdk_config()?))
});
(*AWS_S3_CLIENT)
.as_ref()
.map(|client| client.clone())
.map_err(|e| match e.get_ref() {
Some(inner) => Error::new(e.kind(), inner),
None => Error::from(e.kind()),
})
}
impl PersistentFSBackend for S3FSBackend {
fn new<P: AsRef<Path>>(path: P) -> std::io::Result<Self> {
let path = path.as_ref();
let url = Url::parse(path.to_str().ok_or_else(|| Error::new(ErrorKind::InvalidInput, "Invalid path"))?)
.map_err(|e| Error::new(ErrorKind::InvalidInput, format!("Invalid data folder S3 URL {path:?}: {e}")))?;
let bucket = url.host_str()
.ok_or_else(|| Error::new(ErrorKind::InvalidInput, format!("Missing Bucket name in data folder S3 URL {path:?}")))?
.to_string();
let key = url.path().trim_start_matches('/').to_string();
Ok(S3FSBackend {
path: path.to_path_buf(),
bucket,
key,
})
}
async fn read(self) -> std::io::Result<Vec<u8>> {
let S3FSBackend { path, key, bucket } = self;
let result = s3_client()?
.get_object()
.bucket(bucket)
.key(key)
.send()
.await;
match result {
Ok(response) => {
let mut buffer = Vec::new();
response.body.into_async_read().read_to_end(&mut buffer).await?;
Ok(buffer)
}
Err(e) => {
if let Some(service_error) = e.as_service_error() {
if service_error.is_no_such_key() {
Err(Error::new(ErrorKind::NotFound, format!("Data folder S3 object {path:?} not found")))
} else {
Err(Error::other(format!("Failed to request data folder S3 object {path:?}: {e:?}")))
}
} else {
Err(Error::other(format!("Failed to request data folder S3 object {path:?}: {e:?}")))
}
}
}
}
async fn write(self, buf: &[u8]) -> std::io::Result<()> {
let S3FSBackend { path, key, bucket } = self;
let content_type = Path::new(&key)
.extension()
.and_then(|ext| ext.to_str())
.and_then(|ext| ContentType::from_extension(ext))
.and_then(|t| Some(t.to_string()));
s3_client()?
.put_object()
.bucket(bucket)
.set_content_type(content_type)
.key(key)
.storage_class(IntelligentTiering)
.body(ByteStream::from(buf.to_vec()))
.send()
.await
.map_err(|e| Error::other(format!("Failed to write to data folder S3 object {path:?}: {e:?}")))?;
Ok(())
}
async fn path_exists(self) -> std::io::Result<bool> {
Ok(true)
}
async fn file_exists(self) -> std::io::Result<bool> {
let S3FSBackend { path, key, bucket } = self;
match s3_client()?
.head_object()
.bucket(bucket)
.key(key)
.send()
.await {
Ok(_) => Ok(true),
Err(e) => {
if let Some(service_error) = e.as_service_error() {
if service_error.is_not_found() {
Ok(false)
} else {
Err(Error::other(format!("Failed to request data folder S3 object {path:?}: {e:?}")))
}
} else {
Err(Error::other(format!("Failed to request data folder S3 object {path:?}: {e:?}")))
}
}
}
}
async fn path_is_dir(self) -> std::io::Result<bool> {
Ok(true)
}
async fn canonicalize(self) -> std::io::Result<PathBuf> {
Ok(self.path)
}
async fn create_dir_all(self) -> std::io::Result<()> {
Ok(())
}
async fn persist_temp_file(self, temp_file: TempFile<'_>) -> std::io::Result<()> {
let S3FSBackend { path, key, bucket } = self;
// We want to stream the TempFile directly to S3 without copying it into
// another memory buffer. The official AWS SDK makes it easy to stream
// from a `tokio::fs::File`, but does not have a reasonable way to stream
// from an `impl AsyncBufRead`.
//
// A TempFile's contents may be saved in memory or on disk. We use the
// SDK to stream the file if we can access it on disk, otherwise we fall
// back to a second copy in memory.
let file = match temp_file.path() {
Some(path) => File::open(path).await.ok(),
None => None,
};
let byte_stream = match file {
Some(file) => ByteStream::read_from().file(file).build().await.ok(),
None => None,
};
let byte_stream = match byte_stream {
Some(byte_stream) => byte_stream,
None => {
// TODO: Implement a mechanism to stream the file directly to S3
// without buffering it again in memory. This would require
// chunking it into a multi-part upload. See example here:
// https://imfeld.dev/writing/rust_s3_streaming_upload
let mut read_stream = temp_file.open().await?;
let mut buf = Vec::with_capacity(temp_file.len() as usize);
read_stream.read_to_end(&mut buf).await?;
ByteStream::from(buf)
}
};
let content_type = temp_file
.content_type()
.map(|t| t.to_string())
.or_else(||
temp_file.name()
.and_then(|name| Path::new(name).extension())
.and_then(|ext| ext.to_str())
.and_then(|ext| ContentType::from_extension(ext))
.and_then(|t| Some(t.to_string()))
);
s3_client()?
.put_object()
.bucket(bucket)
.key(key)
.storage_class(IntelligentTiering)
.set_content_type(content_type)
.body(byte_stream)
.send()
.await
.map_err(|e| Error::other(format!("Failed to write to data folder S3 object {path:?}: {e:?}")))?;
Ok(())
}
async fn remove_file(self) -> std::io::Result<()> {
let S3FSBackend { path, key, bucket } = self;
s3_client()?
.delete_object()
.bucket(bucket)
.key(key)
.send()
.await
.map_err(|e| Error::other(format!("Failed to delete data folder S3 object {path:?}: {e:?}")))?;
Ok(())
}
async fn remove_dir_all(self) -> std::io::Result<()> {
use aws_sdk_s3::types::{Delete, ObjectIdentifier};
let S3FSBackend { path, key: prefix, bucket } = self;
let s3_client = s3_client()?;
let mut list_response = s3_client
.list_objects_v2()
.bucket(bucket.clone())
.prefix(format!("{prefix}/"))
.into_paginator()
.send();
while let Some(list_result) = list_response.next().await {
let list_result = list_result
.map_err(|e| Error::other(format!("Failed to list data folder S3 objects with prefix {path:?}/ intended for deletion: {e:?}")))?;
let objects = list_result
.contents
.ok_or_else(|| Error::other(format!("Failed to list data folder S3 objects with prefix {path:?}/ intended for deletion: Missing contents")))?;
let keys = objects.into_iter()
.map(|object| object.key
.ok_or_else(|| Error::other(format!("Failed to list data folder S3 objects with prefix {path:?}/ intended for deletion: An object is missing its key")))
)
.collect::<std::io::Result<Vec<_>>>()?;
let mut delete = Delete::builder().quiet(true);
for key in keys {
delete = delete.objects(
ObjectIdentifier::builder()
.key(key)
.build()
.map_err(|e| Error::other(format!("Failed to delete data folder S3 objects with prefix {path:?}/: {e:?}")))?
);
}
let delete = delete
.build()
.map_err(|e| Error::other(format!("Failed to delete data folder S3 objects with prefix {path:?}/: {e:?}")))?;
s3_client
.delete_objects()
.bucket(bucket.clone())
.delete(delete)
.send()
.await
.map_err(|e| Error::other(format!("Failed to delete data folder S3 objects with prefix {path:?}/: {e:?}")))?;
}
Ok(())
}
async fn last_modified(self) -> std::io::Result<SystemTime> {
let S3FSBackend { path, key, bucket } = self;
let response = s3_client()?
.head_object()
.bucket(bucket)
.key(key)
.send()
.await
.map_err(|e| match e.as_service_error() {
Some(service_error) if service_error.is_not_found() => Error::new(ErrorKind::NotFound, format!("Failed to get metadata for data folder S3 object {path:?}: Object does not exist")),
Some(service_error) => Error::other(format!("Failed to get metadata for data folder S3 object {path:?}: {service_error:?}")),
None => Error::other(format!("Failed to get metadata for data folder S3 object {path:?}: {e:?}")),
})?;
let last_modified = response.last_modified
.ok_or_else(|| Error::new(ErrorKind::NotFound, format!("Failed to get metadata for data folder S3 object {path:?}: Missing last modified data")))?;
SystemTime::try_from(last_modified)
.map_err(|e| Error::new(ErrorKind::InvalidData, format!("Failed to parse last modified date for data folder S3 object {path:?}: {e:?}")))
}
async fn download_url(self, _local_host: &str) -> std::io::Result<String> {
use std::time::Duration;
use aws_sdk_s3::presigning::PresigningConfig;
let S3FSBackend { path, key, bucket } = self;
s3_client()?
.get_object()
.bucket(bucket)
.key(key)
.presigned(
PresigningConfig::expires_in(Duration::from_secs(5 * 60))
.map_err(|e| Error::other(
format!("Failed to generate presigned config for GetObject URL for data folder S3 object {path:?}: {e:?}")
))?
)
.await
.map(|presigned| presigned.uri().to_string())
.map_err(|e| Error::other(format!("Failed to generate presigned URL for GetObject for data folder S3 object {path:?}: {e:?}")))
}
}

View file

@ -82,6 +82,12 @@ impl Fairing for AppHeaders {
// 2FA/MFA Site check: api.2fa.directory
// # Mail Relay: https://bitwarden.com/blog/add-privacy-and-security-using-email-aliases-with-bitwarden/
// app.simplelogin.io, app.addy.io, api.fastmail.com, quack.duckduckgo.com
#[cfg(s3)]
let s3_connect_src = "https://*.amazonaws.com";
#[cfg(not(s3))]
let s3_connect_src = "";
let csp = format!(
"default-src 'none'; \
font-src 'self'; \
@ -108,6 +114,7 @@ impl Fairing for AppHeaders {
https://app.addy.io/api/ \
https://api.fastmail.com/ \
https://api.forwardemail.net \
{s3_connect_src} \
{allowed_connect_src};\
",
icon_service_csp = CONFIG._icon_service_csp(),