0
Fork 0
mirror of https://github.com/willnorris/imageproxy.git synced 2024-12-16 21:56:43 -05:00

switch to official AWS Go SDK

This uses the official Go SDK from Amazon, which supports the newer v4
authentication method.  Fixes #74.  Doing so also required adding a new
s3cache package which uses the official SDK.

THIS IS A BREAKING CHANGE for anyone that uses s3, since the syntax of
the command line flag is now different.  This is unfortunately necessary
because aws-sdk-go always requires the region to be explicitly declared,
which wasn't always the case with the previous format.  This breaking
change is unfortunate, but given that the other s3 package hasn't seen
updates in years, and so many new S3 regions only support the newer v4
authentication method, it's necessary.
This commit is contained in:
Will Norris 2017-09-12 04:11:41 +00:00
parent 5264eb6db0
commit 0ee5167444
3 changed files with 116 additions and 5 deletions

View file

@ -98,10 +98,12 @@ enabled using the `-cache` flag. It supports the following values:
available memory and is not recommended for production systems) available memory and is not recommended for production systems)
- directory on local disk (e.g. `/tmp/imageproxy`) - will cache images - directory on local disk (e.g. `/tmp/imageproxy`) - will cache images
on disk on disk
- s3 URL (e.g. `s3://s3-us-west-2.amazonaws.com/my-bucket`) - will cache - s3 URL (e.g. `s3://region/bucket-name/optional-path-prefix`) - will cache
images on Amazon S3. This requires either an IAM role and instance profile images on Amazon S3. This requires either an IAM role and instance profile
with access to your your bucket or `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY` with access to your your bucket or `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY`
environmental variables be set. environmental variables be set. (Additional methods of loading credentials
are documented in the [aws-sdk-go session
package](https://docs.aws.amazon.com/sdk-for-go/api/aws/session/)).
- gcs URL (e.g. `gcs://bucket-name/optional-path-prefix`) - will cache images - gcs URL (e.g. `gcs://bucket-name/optional-path-prefix`) - will cache images
on Google Cloud Storage. This requires `GCP_PRIVATE_KEY` environmental on Google Cloud Storage. This requires `GCP_PRIVATE_KEY` environmental
variable be set. variable be set.

View file

@ -32,8 +32,8 @@ import (
"github.com/gregjones/httpcache/diskcache" "github.com/gregjones/httpcache/diskcache"
rediscache "github.com/gregjones/httpcache/redis" rediscache "github.com/gregjones/httpcache/redis"
"github.com/peterbourgon/diskv" "github.com/peterbourgon/diskv"
"sourcegraph.com/sourcegraph/s3cache"
"willnorris.com/go/imageproxy" "willnorris.com/go/imageproxy"
"willnorris.com/go/imageproxy/internal/s3cache"
) )
var addr = flag.String("addr", "localhost:8080", "TCP address to listen on") var addr = flag.String("addr", "localhost:8080", "TCP address to listen on")
@ -110,8 +110,7 @@ func parseCache() (imageproxy.Cache, error) {
switch u.Scheme { switch u.Scheme {
case "s3": case "s3":
u.Scheme = "https" return s3cache.New(u.String())
return s3cache.New(u.String()), nil
case "gcs": case "gcs":
return gcscache.New(u.String()), nil return gcscache.New(u.String()), nil
case "azure": case "azure":

110
internal/s3cache/s3cache.go Normal file
View file

@ -0,0 +1,110 @@
// Package s3cache provides an httpcache.Cache implementation that stores
// cached values on Amazon S3.
package s3cache
import (
"bytes"
"crypto/md5"
"encoding/hex"
"io"
"io/ioutil"
"log"
"net/url"
"path"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type cache struct {
*s3.S3
bucket, prefix string
}
func (c *cache) Get(key string) ([]byte, bool) {
key = path.Join(c.prefix, keyToFilename(key))
input := &s3.GetObjectInput{
Bucket: &c.bucket,
Key: &key,
}
resp, err := c.GetObject(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok && aerr.Code() != "NoSuchKey" {
log.Printf("error fetching from s3: %v", aerr)
}
return nil, false
}
value, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("error reading s3 response body: %v", err)
return nil, false
}
return value, true
}
func (c *cache) Set(key string, value []byte) {
key = path.Join(c.prefix, keyToFilename(key))
input := &s3.PutObjectInput{
Body: aws.ReadSeekCloser(bytes.NewReader(value)),
Bucket: &c.bucket,
Key: &key,
}
_, err := c.PutObject(input)
if err != nil {
log.Printf("error writing to s3: %v", err)
}
}
func (c *cache) Delete(key string) {
key = path.Join(c.prefix, keyToFilename(key))
input := &s3.DeleteObjectInput{
Bucket: &c.bucket,
Key: &key,
}
_, err := c.DeleteObject(input)
if err != nil {
log.Printf("error deleting from s3: %v", err)
}
}
func keyToFilename(key string) string {
h := md5.New()
io.WriteString(h, key)
return hex.EncodeToString(h.Sum(nil))
}
// New constructs a cache configured using the provided URL string. URL should
// be of the form: "s3://region/bucket/optional-path-prefix". Credentials
// should be specified using one of the mechanisms supported by aws-sdk-go (see
// https://docs.aws.amazon.com/sdk-for-go/api/aws/session/).
func New(s string) (*cache, error) {
u, err := url.Parse(s)
if err != nil {
return nil, err
}
region := u.Host
path := strings.SplitN(strings.TrimPrefix(u.Path, "/"), "/", 2)
bucket := path[0]
var prefix string
if len(path) > 1 {
prefix = path[1]
}
sess, err := session.NewSession(&aws.Config{Region: &region})
if err != nil {
return nil, err
}
return &cache{
S3: s3.New(sess),
bucket: bucket,
prefix: prefix,
}, nil
}