0
Fork 0
mirror of https://github.com/willnorris/imageproxy.git synced 2024-12-16 21:56:43 -05:00

vendor: add sourcegraph/s3cache and dependencies

Adds:
 - github.com/kr/http/transport
 - github.com/sqs/s3
 - github.com/sqs/s3/s3util
 - sourcegraph.com/sourcegraph/s3cache
This commit is contained in:
Will Norris 2015-12-07 20:04:55 -08:00
parent ec96fcbc90
commit 11370ac826
19 changed files with 1228 additions and 0 deletions

3
vendor/github.com/kr/http/transport/AUTHORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/github.com/kr/http/transport/CONTRIBUTORS generated vendored Normal file
View file

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/github.com/kr/http/transport/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2009 The transport Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

4
vendor/github.com/kr/http/transport/Readme generated vendored Normal file
View file

@ -0,0 +1,4 @@
package transport contains a generic wrapper for http.RoundTripper
It was shamelessly copied (slightly modified) from
https://github.com/golang/oauth2/blob/95a9f97e5/transport.go

132
vendor/github.com/kr/http/transport/transport.go generated vendored Normal file
View file

@ -0,0 +1,132 @@
// Copyright 2014 The transport Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transport provides a general-purpose wapper for
// http.RoundTripper. It implements the pattern of taking a
// request, modifying a copy, and passing the modified copy to an
// underlying RoundTripper, including bookkeeping necessary to
// cancel in-flight requests.
package transport
import (
"errors"
"io"
"net/http"
"sync"
)
// Wrapper is an http.RoundTripper that makes HTTP requests,
// wrapping a base RoundTripper and altering every outgoing
// request in some way.
type Wrapper struct {
// Modify alters the request as needed.
Modify func(*http.Request) error
// Base is the base RoundTripper used to make HTTP requests.
// If nil, http.DefaultTransport is used.
Base http.RoundTripper
mu sync.Mutex // guards modReq
modReq map[*http.Request]*http.Request // original -> modified
}
// RoundTrip provides a copy of req
// to the underlying RoundTripper,
// altered in some way by Modify.
func (t *Wrapper) RoundTrip(req *http.Request) (*http.Response, error) {
if t.Modify == nil {
return nil, errors.New("transport: Wrapper's Modify is nil")
}
req2 := cloneRequest(req) // per RoundTripper contract
err := t.Modify(req2)
if err != nil {
return nil, err
}
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
if err != nil {
t.setModReq(req, nil)
return nil, err
}
res.Body = &onEOFReader{
rc: res.Body,
fn: func() { t.setModReq(req, nil) },
}
return res, nil
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *Wrapper) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
t.mu.Lock()
modReq := t.modReq[req]
delete(t.modReq, req)
t.mu.Unlock()
cr.CancelRequest(modReq)
}
}
func (t *Wrapper) base() http.RoundTripper {
if t.Base != nil {
return t.Base
}
return http.DefaultTransport
}
func (t *Wrapper) setModReq(orig, mod *http.Request) {
t.mu.Lock()
defer t.mu.Unlock()
if t.modReq == nil {
t.modReq = make(map[*http.Request]*http.Request)
}
if mod == nil {
delete(t.modReq, orig)
} else {
t.modReq[orig] = mod
}
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
type onEOFReader struct {
rc io.ReadCloser
fn func()
}
func (r *onEOFReader) Read(p []byte) (n int, err error) {
n, err = r.rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
func (r *onEOFReader) Close() error {
err := r.rc.Close()
r.runFunc()
return err
}
func (r *onEOFReader) runFunc() {
if fn := r.fn; fn != nil {
fn()
r.fn = nil
}
}

19
vendor/github.com/sqs/s3/License generated vendored Normal file
View file

@ -0,0 +1,19 @@
Copyright (c) 2012 Keith Rarick.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

4
vendor/github.com/sqs/s3/Readme generated vendored Normal file
View file

@ -0,0 +1,4 @@
Package s3 signs HTTP requests for use with Amazons S3 API.
Documentation:
http://godoc.org/github.com/kr/s3

28
vendor/github.com/sqs/s3/client.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
package s3
import (
"net/http"
"time"
"github.com/kr/http/transport"
)
// Client returns an HTTP client that signs all outgoing requests.
// The returned Transport also provides CancelRequest.
// Client is equivalent to DefaultService.Client.
func Client(k Keys) *http.Client {
return DefaultService.Client(k)
}
// Client returns an HTTP client that signs all outgoing requests.
// The returned Transport also provides CancelRequest.
func (s *Service) Client(k Keys) *http.Client {
tr := &transport.Wrapper{Modify: func(r *http.Request) error {
if r.Header.Get("Date") == "" {
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
}
s.Sign(r, k)
return nil
}}
return &http.Client{Transport: tr}
}

4
vendor/github.com/sqs/s3/s3util/Readme generated vendored Normal file
View file

@ -0,0 +1,4 @@
Package s3util provides streaming transfers to and from Amazon S3.
Full documentation:
http://godoc.org/github.com/kr/s3/s3util

28
vendor/github.com/sqs/s3/s3util/config.go generated vendored Normal file
View file

@ -0,0 +1,28 @@
// Package s3util provides streaming transfers to and from Amazon S3.
//
// To use it, open or create an S3 object, read or write data,
// and close the object.
//
// You must assign valid credentials to DefaultConfig.Keys before using
// DefaultConfig. Be sure to close an io.WriteCloser returned by this package,
// to flush buffers and complete the multipart upload process.
package s3util
// TODO(kr): parse error responses; return structured data
import (
"net/http"
"github.com/sqs/s3"
)
var DefaultConfig = &Config{
Service: s3.DefaultService,
Keys: new(s3.Keys),
}
type Config struct {
*s3.Service
*s3.Keys
*http.Client // if nil, uses http.DefaultClient
}

32
vendor/github.com/sqs/s3/s3util/delete.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
package s3util
import (
"io"
"net/http"
"time"
)
// Delete deletes the S3 object at url. An HTTP status other than 204 (No
// Content) is considered an error.
//
// If c is nil, Delete uses DefaultConfig.
func Delete(url string, c *Config) (io.ReadCloser, error) {
if c == nil {
c = DefaultConfig
}
r, _ := http.NewRequest("DELETE", url, nil)
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
c.Sign(r, *c.Keys)
client := c.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(r)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusNoContent {
return nil, newRespError(resp)
}
return resp.Body, nil
}

29
vendor/github.com/sqs/s3/s3util/error.go generated vendored Normal file
View file

@ -0,0 +1,29 @@
package s3util
import (
"bytes"
"fmt"
"io"
"net/http"
)
type respError struct {
r *http.Response
b bytes.Buffer
}
func newRespError(r *http.Response) *respError {
e := new(respError)
e.r = r
io.Copy(&e.b, r.Body)
r.Body.Close()
return e
}
func (e *respError) Error() string {
return fmt.Sprintf(
"unwanted http status %d: %q",
e.r.StatusCode,
e.b.String(),
)
}

33
vendor/github.com/sqs/s3/s3util/open.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
package s3util
import (
"io"
"net/http"
"time"
)
// Open requests the S3 object at url. An HTTP status other than 200 is
// considered an error.
//
// If c is nil, Open uses DefaultConfig.
func Open(url string, c *Config) (io.ReadCloser, error) {
if c == nil {
c = DefaultConfig
}
// TODO(kr): maybe parallel range fetching
r, _ := http.NewRequest("GET", url, nil)
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
c.Sign(r, *c.Keys)
client := c.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(r)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 && resp.StatusCode != http.StatusPartialContent {
return nil, newRespError(resp)
}
return resp.Body, nil
}

218
vendor/github.com/sqs/s3/s3util/readdir.go generated vendored Normal file
View file

@ -0,0 +1,218 @@
package s3util
import (
"bytes"
"encoding/xml"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// File represents an S3 object or directory.
type File struct {
url string
prefix string
config *Config
result *listObjectsResult
}
type fileInfo struct {
name string
size int64
dir bool
modTime time.Time
sys *Stat
}
// Stat contains information about an S3 object or directory.
// It is the "underlying data source" returned by method Sys
// for each FileInfo produced by this package.
// fi.Sys().(*s3util.Stat)
// For the meaning of these fields, see
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html.
type Stat struct {
Key string
LastModified string
ETag string // ETag value, without double quotes.
Size string
StorageClass string
OwnerID string `xml:"Owner>ID"`
OwnerName string `xml:"Owner>DisplayName"`
}
type listObjectsResult struct {
IsTruncated bool
Contents []Stat
Directories []string `xml:"CommonPrefixes>Prefix"` // Suffix "/" trimmed
}
func (f *fileInfo) Name() string { return f.name }
func (f *fileInfo) Size() int64 { return f.size }
func (f *fileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
func (f *fileInfo) ModTime() time.Time {
if f.modTime.IsZero() && f.sys != nil {
// we return the zero value if a parse error ever happens.
f.modTime, _ = time.Parse(time.RFC3339Nano, f.sys.LastModified)
}
return f.modTime
}
func (f *fileInfo) IsDir() bool { return f.dir }
func (f *fileInfo) Sys() interface{} { return f.sys }
// NewFile returns a new File with the given URL and config.
//
// Set rawurl to a directory on S3, such as
// https://mybucket.s3.amazonaws.com/myfolder.
// The URL cannot have query parameters or a fragment.
// If c is nil, DefaultConfig will be used.
func NewFile(rawurl string, c *Config) (*File, error) {
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
if u.RawQuery != "" {
return nil, errors.New("url cannot have raw query parameters.")
}
if u.Fragment != "" {
return nil, errors.New("url cannot have a fragment.")
}
prefix := strings.TrimLeft(u.Path, "/")
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
u.Path = ""
return &File{u.String(), prefix, c, nil}, nil
}
// Readdir requests a list of entries in the S3 directory
// represented by f and returns a slice of up to n FileInfo
// values, in alphabetical order. Subsequent calls
// on the same File will yield further FileInfos.
// Only direct children are returned, not deeper descendants.
func (f *File) Readdir(n int) ([]os.FileInfo, error) {
if f.result != nil && !f.result.IsTruncated {
return make([]os.FileInfo, 0), io.EOF
}
reader, err := f.sendRequest(n)
if err != nil {
return nil, err
}
defer reader.Close()
return f.parseResponse(reader)
}
func (f *File) sendRequest(count int) (io.ReadCloser, error) {
c := f.config
if c == nil {
c = DefaultConfig
}
var buf bytes.Buffer
buf.WriteString(f.url)
buf.WriteString("?delimiter=%2F")
if f.prefix != "" {
buf.WriteString("&prefix=")
buf.WriteString(url.QueryEscape(f.prefix))
}
if count > 0 {
buf.WriteString("&max-keys=")
buf.WriteString(strconv.Itoa(count))
}
if f.result != nil && f.result.IsTruncated {
var lastDir, lastKey, marker string
if len(f.result.Directories) > 0 {
lastDir = f.result.Directories[len(f.result.Directories)-1]
}
if len(f.result.Contents) > 0 {
lastKey = f.result.Contents[len(f.result.Contents)-1].Key
}
if lastKey > lastDir {
marker = lastKey
} else {
marker = lastDir
}
if marker != "" {
buf.WriteString("&marker=")
buf.WriteString(url.QueryEscape(marker))
}
}
u := buf.String()
r, _ := http.NewRequest("GET", u, nil)
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
c.Sign(r, *c.Keys)
client := c.Client
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(r)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, newRespError(resp)
}
return resp.Body, nil
}
func (f *File) parseResponse(reader io.Reader) ([]os.FileInfo, error) {
// Reading it all in now makes the XML decoding way faster.
bb, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
reader = bytes.NewReader(bb)
decoder := xml.NewDecoder(reader)
result := listObjectsResult{}
if err := decoder.Decode(&result); err != nil {
return nil, err
}
infos := make([]os.FileInfo, len(result.Contents)+len(result.Directories))
var size int64
var name string
var is_dir bool
for i, content := range result.Contents {
c := content
c.ETag = strings.Trim(c.ETag, `"`)
size, _ = strconv.ParseInt(c.Size, 10, 0)
if size == 0 && strings.HasSuffix(c.Key, "/") {
name = strings.TrimRight(c.Key, "/")
is_dir = true
} else {
name = c.Key
is_dir = false
}
infos[i] = &fileInfo{
name: name,
size: size,
dir: is_dir,
sys: &c,
}
}
for i, dir := range result.Directories {
infos[len(result.Contents)+i] = &fileInfo{
name: strings.TrimRight(dir, "/"),
size: 0,
dir: true,
}
}
f.result = &result
return infos, nil
}

291
vendor/github.com/sqs/s3/s3util/uploader.go generated vendored Normal file
View file

@ -0,0 +1,291 @@
package s3util
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"sync"
"syscall"
"time"
"github.com/sqs/s3"
)
// defined by amazon
const (
minPartSize = 5 * 1024 * 1024
maxPartSize = 1<<31 - 1 // for 32-bit use; amz max is 5GiB
maxObjSize = 5 * 1024 * 1024 * 1024 * 1024
maxNPart = 10000
)
const (
concurrency = 5
nTry = 2
)
type part struct {
r io.ReadSeeker
len int64
// read by xml encoder
PartNumber int
ETag string
}
type uploader struct {
s3 s3.Service
keys s3.Keys
url string
client *http.Client
UploadId string // written by xml decoder
bufsz int64
buf []byte
off int
ch chan *part
part int
closed bool
err error
wg sync.WaitGroup
xml struct {
XMLName string `xml:"CompleteMultipartUpload"`
Part []*part
}
}
// Create creates an S3 object at url and sends multipart upload requests as
// data is written.
//
// If h is not nil, each of its entries is added to the HTTP request header.
// If c is nil, Create uses DefaultConfig.
func Create(url string, h http.Header, c *Config) (io.WriteCloser, error) {
if c == nil {
c = DefaultConfig
}
return newUploader(url, h, c)
}
// Sends an S3 multipart upload initiation request.
// See http://docs.amazonwebservices.com/AmazonS3/latest/dev/mpuoverview.html.
// This initial request returns an UploadId that we use to identify
// subsequent PUT requests.
func newUploader(url string, h http.Header, c *Config) (u *uploader, err error) {
u = new(uploader)
u.s3 = *c.Service
u.url = url
u.keys = *c.Keys
u.client = c.Client
if u.client == nil {
u.client = http.DefaultClient
}
u.bufsz = minPartSize
r, err := http.NewRequest("POST", url+"?uploads", nil)
if err != nil {
return nil, err
}
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
for k := range h {
for _, v := range h[k] {
r.Header.Add(k, v)
}
}
u.s3.Sign(r, u.keys)
resp, err := u.client.Do(r)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, newRespError(resp)
}
err = xml.NewDecoder(resp.Body).Decode(u)
if err != nil {
return nil, err
}
u.ch = make(chan *part)
for i := 0; i < concurrency; i++ {
go u.worker()
}
return u, nil
}
func (u *uploader) Write(p []byte) (n int, err error) {
if u.closed {
return 0, syscall.EINVAL
}
if u.err != nil {
return 0, u.err
}
for n < len(p) {
if cap(u.buf) == 0 {
u.buf = make([]byte, int(u.bufsz))
// Increase part size (1.001x).
// This lets us reach the max object size (5TiB) while
// still doing minimal buffering for small objects.
u.bufsz = min(u.bufsz+u.bufsz/1000, maxPartSize)
}
r := copy(u.buf[u.off:], p[n:])
u.off += r
n += r
if u.off == len(u.buf) {
u.flush()
}
}
return n, nil
}
func (u *uploader) flush() {
u.wg.Add(1)
u.part++
p := &part{bytes.NewReader(u.buf[:u.off]), int64(u.off), u.part, ""}
u.xml.Part = append(u.xml.Part, p)
u.ch <- p
u.buf, u.off = nil, 0
}
func (u *uploader) worker() {
for p := range u.ch {
u.retryUploadPart(p)
}
}
// Calls putPart up to nTry times to recover from transient errors.
func (u *uploader) retryUploadPart(p *part) {
defer u.wg.Done()
defer func() { p.r = nil }() // free the large buffer
var err error
for i := 0; i < nTry; i++ {
p.r.Seek(0, 0)
err = u.putPart(p)
if err == nil {
return
}
}
u.err = err
}
// Uploads part p, reading its contents from p.r.
// Stores the ETag in p.ETag.
func (u *uploader) putPart(p *part) error {
v := url.Values{}
v.Set("partNumber", strconv.Itoa(p.PartNumber))
v.Set("uploadId", u.UploadId)
req, err := http.NewRequest("PUT", u.url+"?"+v.Encode(), p.r)
if err != nil {
return err
}
req.ContentLength = p.len
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newRespError(resp)
}
s := resp.Header.Get("etag") // includes quote chars for some reason
if len(s) < 2 {
return fmt.Errorf("received invalid etag %q", s)
}
p.ETag = s[1 : len(s)-1]
return nil
}
func (u *uploader) Close() error {
if u.closed {
return syscall.EINVAL
}
if cap(u.buf) > 0 {
u.flush()
}
u.wg.Wait()
close(u.ch)
u.closed = true
if u.err != nil {
u.abort()
return u.err
}
if u.part == 0 {
// Can't upload an empty file with multipart uploads.
u.abort()
if u.err != nil {
return u.err
}
req, err := http.NewRequest("PUT", u.url, bytes.NewReader(nil))
if err != nil {
return err
}
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return newRespError(resp)
}
resp.Body.Close()
return nil
}
body, err := xml.Marshal(u.xml)
if err != nil {
return err
}
b := bytes.NewBuffer(body)
v := url.Values{}
v.Set("uploadId", u.UploadId)
req, err := http.NewRequest("POST", u.url+"?"+v.Encode(), b)
if err != nil {
return err
}
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return newRespError(resp)
}
resp.Body.Close()
return nil
}
func (u *uploader) abort() {
// TODO(kr): devise a reasonable way to report an error here in addition
// to the error that caused the abort.
v := url.Values{}
v.Set("uploadId", u.UploadId)
s := u.url + "?" + v.Encode()
req, err := http.NewRequest("DELETE", s, nil)
if err != nil {
return
}
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
u.s3.Sign(req, u.keys)
resp, err := u.client.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return
}
}
func min(a, b int64) int64 {
if a < b {
return a
}
return b
}

200
vendor/github.com/sqs/s3/sign.go generated vendored Normal file
View file

@ -0,0 +1,200 @@
// Package s3 signs HTTP requests for Amazon S3 and compatible services.
package s3
// See
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/RESTAuthentication.html.
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"io"
"net/http"
"sort"
"strings"
)
var signParams = map[string]bool{
"acl": true,
"delete": true,
"lifecycle": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"response-content-language": true,
"response-content-type": true,
"response-expires": true,
"restore": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"website": true,
}
// Keys holds a set of Amazon Security Credentials.
type Keys struct {
AccessKey string
SecretKey string
// SecurityToken is used for temporary security credentials.
// If set, it will be added to header field X-Amz-Security-Token
// before signing a request.
SecurityToken string
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/MakingRequests.html#TypesofSecurityCredentials
}
// IdentityBucket returns subdomain.
// It is designed to be used with S3-compatible services that
// treat the entire subdomain as the bucket name, for example
// storage.io.
func IdentityBucket(subdomain string) string {
return subdomain
}
// AmazonBucket returns everything up to the last '.' in subdomain.
// It is designed to be used with the Amazon service.
// "johnsmith.s3" becomes "johnsmith"
// "johnsmith.s3-eu-west-1" becomes "johnsmith"
// "www.example.com.s3" becomes "www.example.com"
func AmazonBucket(subdomain string) string {
if i := strings.LastIndex(subdomain, "."); i != -1 {
return subdomain[:i]
}
return ""
}
// DefaultService is the default Service used by Sign.
var DefaultService = &Service{Domain: "amazonaws.com"}
// Sign signs an HTTP request with the given S3 keys.
//
// This function is a wrapper around DefaultService.Sign.
func Sign(r *http.Request, k Keys) {
DefaultService.Sign(r, k)
}
// Service represents an S3-compatible service.
type Service struct {
// Domain is the service's root domain. It is used to extract
// the subdomain from an http.Request before passing the
// subdomain to Bucket.
Domain string
// Bucket derives the bucket name from a subdomain.
// If nil, AmazonBucket is used.
Bucket func(subdomain string) string
}
// Sign signs an HTTP request with the given S3 keys for use on service s.
func (s *Service) Sign(r *http.Request, k Keys) {
if k.SecurityToken != "" {
r.Header.Set("X-Amz-Security-Token", k.SecurityToken)
}
h := hmac.New(sha1.New, []byte(k.SecretKey))
s.writeSigData(h, r)
sig := make([]byte, base64.StdEncoding.EncodedLen(h.Size()))
base64.StdEncoding.Encode(sig, h.Sum(nil))
r.Header.Set("Authorization", "AWS "+k.AccessKey+":"+string(sig))
}
func (s *Service) writeSigData(w io.Writer, r *http.Request) {
w.Write([]byte(r.Method))
w.Write([]byte{'\n'})
w.Write([]byte(r.Header.Get("content-md5")))
w.Write([]byte{'\n'})
w.Write([]byte(r.Header.Get("content-type")))
w.Write([]byte{'\n'})
if _, ok := r.Header["X-Amz-Date"]; !ok {
w.Write([]byte(r.Header.Get("date")))
}
w.Write([]byte{'\n'})
writeAmzHeaders(w, r)
s.writeResource(w, r)
}
func (s *Service) writeResource(w io.Writer, r *http.Request) {
s.writeVhostBucket(w, strings.ToLower(r.Host))
path := r.URL.RequestURI()
if r.URL.RawQuery != "" {
path = path[:len(path)-len(r.URL.RawQuery)-1]
}
w.Write([]byte(path))
s.writeSubResource(w, r)
}
func (s *Service) writeVhostBucket(w io.Writer, host string) {
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
if host == s.Domain {
// no vhost - do nothing
} else if strings.HasSuffix(host, "."+s.Domain) {
// vhost - bucket may be in prefix
b := s.Bucket
if b == nil {
b = AmazonBucket
}
bucket := b(host[:len(host)-len(s.Domain)-1])
if bucket != "" {
w.Write([]byte{'/'})
w.Write([]byte(bucket))
}
} else {
// cname - bucket is host
w.Write([]byte{'/'})
w.Write([]byte(host))
}
}
func (s *Service) writeSubResource(w io.Writer, r *http.Request) {
var a []string
for k, vs := range r.URL.Query() {
if signParams[k] {
for _, v := range vs {
if v == "" {
a = append(a, k)
} else {
a = append(a, k+"="+v)
}
}
}
}
sort.Strings(a)
var p byte = '?'
for _, s := range a {
w.Write([]byte{p})
w.Write([]byte(s))
p = '&'
}
}
func writeAmzHeaders(w io.Writer, r *http.Request) {
var keys []string
for k, _ := range r.Header {
if strings.HasPrefix(strings.ToLower(k), "x-amz-") {
keys = append(keys, k)
}
}
sort.Strings(keys)
var a []string
for _, k := range keys {
v := r.Header[k]
a = append(a, strings.ToLower(k)+":"+strings.Join(v, ","))
}
for _, h := range a {
w.Write([]byte(h))
w.Write([]byte{'\n'})
}
}

26
vendor/sourcegraph.com/sourcegraph/s3cache/LICENSE generated vendored Normal file
View file

@ -0,0 +1,26 @@
Copyright (c) 2013 The s3cache AUTHORS. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Sourcegraph Inc. nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

127
vendor/sourcegraph.com/sourcegraph/s3cache/s3cache.go generated vendored Normal file
View file

@ -0,0 +1,127 @@
// Package s3cache provides an implementation of httpcache.Cache that stores and
// retrieves data using Amazon S3.
package s3cache // import "sourcegraph.com/sourcegraph/s3cache"
import (
"compress/gzip"
"crypto/md5"
"encoding/hex"
"io"
"io/ioutil"
"log"
"os"
"strconv"
"strings"
"github.com/sqs/s3"
"github.com/sqs/s3/s3util"
)
// Cache objects store and retrieve data using Amazon S3.
type Cache struct {
// Config is the Amazon S3 configuration.
Config s3util.Config
// BucketURL is the URL to the bucket on Amazon S3, which includes the
// bucket name and the AWS region. Example:
// "https://s3-us-west-2.amazonaws.com/mybucket".
BucketURL string
// Gzip indicates whether cache entries should be gzipped in Set and
// gunzipped in Get. If true, cache entry keys will have the suffix ".gz"
// appended.
Gzip bool
}
var noLogErrors, _ = strconv.ParseBool(os.Getenv("NO_LOG_S3CACHE_ERRORS"))
func (c *Cache) Get(key string) (resp []byte, ok bool) {
rdr, err := s3util.Open(c.url(key), &c.Config)
if err != nil {
return []byte{}, false
}
defer rdr.Close()
if c.Gzip {
rdr, err = gzip.NewReader(rdr)
if err != nil {
return nil, false
}
defer rdr.Close()
}
resp, err = ioutil.ReadAll(rdr)
if err != nil {
if !noLogErrors {
log.Printf("s3cache.Get failed: %s", err)
}
}
return resp, err == nil
}
func (c *Cache) Set(key string, resp []byte) {
w, err := s3util.Create(c.url(key), nil, &c.Config)
if err != nil {
if !noLogErrors {
log.Printf("s3util.Create failed: %s", err)
}
return
}
defer w.Close()
if c.Gzip {
w = gzip.NewWriter(w)
defer w.Close()
}
_, err = w.Write(resp)
if err != nil {
if !noLogErrors {
log.Printf("s3cache.Set failed: %s", err)
}
}
}
func (c *Cache) Delete(key string) {
rdr, err := s3util.Delete(c.url(key), &c.Config)
if err != nil {
if !noLogErrors {
log.Printf("s3cache.Delete failed: %s", err)
}
return
}
defer rdr.Close()
}
func (c *Cache) url(key string) string {
key = cacheKeyToObjectKey(key)
if c.Gzip {
key += ".gz"
}
if strings.HasSuffix(c.BucketURL, "/") {
return c.BucketURL + key
}
return c.BucketURL + "/" + key
}
func cacheKeyToObjectKey(key string) string {
h := md5.New()
io.WriteString(h, key)
return hex.EncodeToString(h.Sum(nil))
}
// New returns a new Cache with underlying storage in Amazon S3. The bucketURL
// is the full URL to the bucket on Amazon S3, including the bucket name and AWS
// region (e.g., "https://s3-us-west-2.amazonaws.com/mybucket").
//
// The environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_KEY are used as the AWS
// credentials. To use different credentials, modify the returned Cache object
// or construct a Cache object manually.
func New(bucketURL string) *Cache {
return &Cache{
Config: s3util.Config{
Keys: &s3.Keys{
AccessKey: os.Getenv("AWS_ACCESS_KEY_ID"),
SecretKey: os.Getenv("AWS_SECRET_KEY"),
},
Service: s3.DefaultService,
},
BucketURL: bucketURL,
}
}

20
vendor/vendor.json vendored
View file

@ -22,6 +22,11 @@
"revision": "ae1d6feaf2d3354cece07d7dcf420de6745ad7b6",
"revisionTime": "2015-10-25T15:48:47Z"
},
{
"path": "github.com/kr/http/transport",
"revision": "77bd98b60462aab91988ae155ecc11a74833f5b7",
"revisionTime": "2015-05-05T14:27:37-07:00"
},
{
"path": "github.com/petar/GoLLRB/llrb",
"revision": "53be0d36a84c2a886ca057d34b6aa4468df9ccb4",
@ -32,6 +37,16 @@
"revision": "72aa5da9f7d1125b480b83c6dc5ad09a1f04508c",
"revisionTime": "2014-12-31T15:08:51+01:00"
},
{
"path": "github.com/sqs/s3",
"revision": "ee47412d98d9637046a7096f24809bc45f6bcbd5",
"revisionTime": "2015-02-03T03:00:30-08:00"
},
{
"path": "github.com/sqs/s3/s3util",
"revision": "ee47412d98d9637046a7096f24809bc45f6bcbd5",
"revisionTime": "2015-02-03T03:00:30-08:00"
},
{
"path": "golang.org/x/image/bmp",
"revision": "baddd3465a05d84a6d8d3507547a91cb188c81ea",
@ -47,6 +62,11 @@
"revision": "baddd3465a05d84a6d8d3507547a91cb188c81ea",
"revisionTime": "2015-09-11T13:43:18+10:00"
},
{
"path": "sourcegraph.com/sourcegraph/s3cache",
"revision": "4150cc6b046500fb69804e34aa4a1ca8be361bcb",
"revisionTime": "2014-12-02T11:37:49-08:00"
},
{
"path": "willnorris.com/go/gifresize",
"revision": "9ea3f344c54d0eaa7e9494e3a00bec96676c9bab",