diff --git a/README.md b/README.md index 6254761..016c5bd 100644 --- a/README.md +++ b/README.md @@ -98,10 +98,12 @@ enabled using the `-cache` flag. It supports the following values: available memory and is not recommended for production systems) - directory on local disk (e.g. `/tmp/imageproxy`) - will cache images on disk - - s3 URL (e.g. `s3://s3-us-west-2.amazonaws.com/my-bucket`) - will cache + - s3 URL (e.g. `s3://region/bucket-name/optional-path-prefix`) - will cache images on Amazon S3. This requires either an IAM role and instance profile with access to your your bucket or `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY` - environmental variables be set. + environmental variables be set. (Additional methods of loading credentials + are documented in the [aws-sdk-go session + package](https://docs.aws.amazon.com/sdk-for-go/api/aws/session/)). - gcs URL (e.g. `gcs://bucket-name/optional-path-prefix`) - will cache images on Google Cloud Storage. This requires `GCP_PRIVATE_KEY` environmental variable be set. diff --git a/cmd/imageproxy/main.go b/cmd/imageproxy/main.go index a18e5c8..70b54cf 100644 --- a/cmd/imageproxy/main.go +++ b/cmd/imageproxy/main.go @@ -32,8 +32,8 @@ import ( "github.com/gregjones/httpcache/diskcache" rediscache "github.com/gregjones/httpcache/redis" "github.com/peterbourgon/diskv" - "sourcegraph.com/sourcegraph/s3cache" "willnorris.com/go/imageproxy" + "willnorris.com/go/imageproxy/internal/s3cache" ) var addr = flag.String("addr", "localhost:8080", "TCP address to listen on") @@ -110,8 +110,7 @@ func parseCache() (imageproxy.Cache, error) { switch u.Scheme { case "s3": - u.Scheme = "https" - return s3cache.New(u.String()), nil + return s3cache.New(u.String()) case "gcs": return gcscache.New(u.String()), nil case "azure": diff --git a/internal/s3cache/s3cache.go b/internal/s3cache/s3cache.go new file mode 100644 index 0000000..c3bbbfd --- /dev/null +++ b/internal/s3cache/s3cache.go @@ -0,0 +1,110 @@ +// Package s3cache provides an httpcache.Cache implementation that stores +// cached values on Amazon S3. +package s3cache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "io" + "io/ioutil" + "log" + "net/url" + "path" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +type cache struct { + *s3.S3 + bucket, prefix string +} + +func (c *cache) Get(key string) ([]byte, bool) { + key = path.Join(c.prefix, keyToFilename(key)) + input := &s3.GetObjectInput{ + Bucket: &c.bucket, + Key: &key, + } + + resp, err := c.GetObject(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() != "NoSuchKey" { + log.Printf("error fetching from s3: %v", aerr) + } + return nil, false + } + + value, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Printf("error reading s3 response body: %v", err) + return nil, false + } + + return value, true +} +func (c *cache) Set(key string, value []byte) { + key = path.Join(c.prefix, keyToFilename(key)) + input := &s3.PutObjectInput{ + Body: aws.ReadSeekCloser(bytes.NewReader(value)), + Bucket: &c.bucket, + Key: &key, + } + + _, err := c.PutObject(input) + if err != nil { + log.Printf("error writing to s3: %v", err) + } +} +func (c *cache) Delete(key string) { + key = path.Join(c.prefix, keyToFilename(key)) + input := &s3.DeleteObjectInput{ + Bucket: &c.bucket, + Key: &key, + } + + _, err := c.DeleteObject(input) + if err != nil { + log.Printf("error deleting from s3: %v", err) + } +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New constructs a cache configured using the provided URL string. URL should +// be of the form: "s3://region/bucket/optional-path-prefix". Credentials +// should be specified using one of the mechanisms supported by aws-sdk-go (see +// https://docs.aws.amazon.com/sdk-for-go/api/aws/session/). +func New(s string) (*cache, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + + region := u.Host + path := strings.SplitN(strings.TrimPrefix(u.Path, "/"), "/", 2) + bucket := path[0] + var prefix string + if len(path) > 1 { + prefix = path[1] + } + + sess, err := session.NewSession(&aws.Config{Region: ®ion}) + if err != nil { + return nil, err + } + + return &cache{ + S3: s3.New(sess), + bucket: bucket, + prefix: prefix, + }, nil +}