2013-12-06 20:40:35 -05:00
|
|
|
// Copyright 2013 Google Inc. All rights reserved.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2013-11-27 14:02:21 -05:00
|
|
|
// Package proxy provides the image proxy.
|
|
|
|
package proxy
|
|
|
|
|
|
|
|
import (
|
2013-12-04 05:55:56 -05:00
|
|
|
"errors"
|
2013-11-27 14:02:21 -05:00
|
|
|
"fmt"
|
2013-12-04 05:55:56 -05:00
|
|
|
"io/ioutil"
|
2013-11-27 14:02:21 -05:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2013-12-04 05:55:56 -05:00
|
|
|
"strconv"
|
2013-11-27 14:02:21 -05:00
|
|
|
"strings"
|
2013-12-04 05:55:56 -05:00
|
|
|
"time"
|
2013-12-04 03:37:13 -05:00
|
|
|
|
2013-12-04 05:55:56 -05:00
|
|
|
"github.com/golang/glog"
|
2013-12-25 21:48:10 -05:00
|
|
|
"github.com/gregjones/httpcache"
|
2013-12-04 03:37:13 -05:00
|
|
|
"github.com/willnorris/go-imageproxy/data"
|
2013-12-05 02:42:59 -05:00
|
|
|
"github.com/willnorris/go-imageproxy/transform"
|
2013-11-27 14:02:21 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
// URLError reports a malformed URL error.
|
|
|
|
type URLError struct {
|
|
|
|
Message string
|
|
|
|
URL *url.URL
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e URLError) Error() string {
|
|
|
|
return fmt.Sprintf("malformed URL %q: %s", e.URL, e.Message)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewRequest parses an http.Request into an image request.
|
2013-12-04 03:37:13 -05:00
|
|
|
func NewRequest(r *http.Request) (*data.Request, error) {
|
2013-11-27 14:02:21 -05:00
|
|
|
var err error
|
2013-12-04 03:37:13 -05:00
|
|
|
req := new(data.Request)
|
2013-11-27 14:02:21 -05:00
|
|
|
|
2013-12-05 11:31:19 -05:00
|
|
|
path := r.URL.Path[1:] // strip leading slash
|
|
|
|
req.URL, err = url.Parse(path)
|
|
|
|
if err != nil || !req.URL.IsAbs() {
|
|
|
|
// first segment is likely options
|
|
|
|
parts := strings.SplitN(path, "/", 2)
|
|
|
|
if len(parts) != 2 {
|
|
|
|
return nil, URLError{"too few path segments", r.URL}
|
|
|
|
}
|
|
|
|
|
|
|
|
req.URL, err = url.Parse(parts[1])
|
|
|
|
if err != nil {
|
|
|
|
return nil, URLError{fmt.Sprintf("unable to parse remote URL: %v", err), r.URL}
|
|
|
|
}
|
|
|
|
|
2013-12-06 14:01:34 -05:00
|
|
|
req.Options = data.ParseOptions(parts[0])
|
2013-11-27 14:02:21 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
if !req.URL.IsAbs() {
|
|
|
|
return nil, URLError{"must provide absolute remote URL", r.URL}
|
|
|
|
}
|
|
|
|
|
|
|
|
if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
|
|
|
|
return nil, URLError{"remote URL must have http or https URL", r.URL}
|
|
|
|
}
|
|
|
|
|
|
|
|
// query string is always part of the remote URL
|
|
|
|
req.URL.RawQuery = r.URL.RawQuery
|
|
|
|
return req, nil
|
|
|
|
}
|
2013-11-27 15:10:29 -05:00
|
|
|
|
|
|
|
// Proxy serves image requests.
|
|
|
|
type Proxy struct {
|
|
|
|
Client *http.Client // client used to fetch remote URLs
|
2013-12-25 21:48:10 -05:00
|
|
|
Cache Cache
|
2013-12-04 06:12:56 -05:00
|
|
|
|
|
|
|
// Whitelist specifies a list of remote hosts that images can be proxied from. An empty list means all hosts are allowed.
|
|
|
|
Whitelist []string
|
2013-12-06 17:17:39 -05:00
|
|
|
|
|
|
|
MaxWidth int
|
|
|
|
MaxHeight int
|
2013-11-27 15:10:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewProxy constructs a new proxy. The provided http Client will be used to
|
|
|
|
// fetch remote URLs. If nil is provided, http.DefaultClient will be used.
|
2013-12-25 21:48:10 -05:00
|
|
|
func NewProxy(client *http.Client, cache Cache) *Proxy {
|
2013-11-27 15:10:29 -05:00
|
|
|
if client == nil {
|
|
|
|
client = http.DefaultClient
|
|
|
|
}
|
2013-12-25 21:48:10 -05:00
|
|
|
if cache == nil {
|
|
|
|
cache = NopCache
|
|
|
|
}
|
|
|
|
|
|
|
|
return &Proxy{
|
|
|
|
Client: &http.Client{
|
|
|
|
Transport: &httpcache.Transport{
|
|
|
|
Transport: client.Transport,
|
|
|
|
Cache: cache,
|
|
|
|
MarkCachedResponses: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Cache: cache,
|
|
|
|
}
|
2013-11-27 15:10:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServeHTTP handles image requests.
|
|
|
|
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
|
|
req, err := NewRequest(r)
|
|
|
|
if err != nil {
|
2013-12-06 20:40:35 -05:00
|
|
|
glog.Errorf("invalid request URL: %v", err)
|
|
|
|
http.Error(w, fmt.Sprintf("invalid request URL: %v", err), http.StatusBadRequest)
|
2013-11-27 15:10:29 -05:00
|
|
|
return
|
|
|
|
}
|
2013-12-04 05:55:56 -05:00
|
|
|
|
2013-12-06 18:03:17 -05:00
|
|
|
if p.MaxWidth > 0 && int(req.Options.Width) > p.MaxWidth {
|
|
|
|
req.Options.Width = float64(p.MaxWidth)
|
2013-12-06 17:17:39 -05:00
|
|
|
}
|
2013-12-06 18:03:17 -05:00
|
|
|
if p.MaxHeight > 0 && int(req.Options.Height) > p.MaxHeight {
|
|
|
|
req.Options.Height = float64(p.MaxHeight)
|
2013-12-06 17:17:39 -05:00
|
|
|
}
|
|
|
|
|
2013-12-04 05:55:56 -05:00
|
|
|
u := req.URL.String()
|
|
|
|
glog.Infof("request for image: %v", u)
|
|
|
|
|
2013-12-04 06:12:56 -05:00
|
|
|
if !p.allowed(req.URL) {
|
2013-12-06 20:40:35 -05:00
|
|
|
glog.Errorf("remote URL is not for an allowed host: %v", req.URL.Host)
|
|
|
|
http.Error(w, fmt.Sprintf("remote URL is not for an allowed host: %v", req.URL.Host), http.StatusBadRequest)
|
2013-12-04 06:12:56 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-12-25 21:48:10 -05:00
|
|
|
image, err := p.fetchRemoteImage(u)
|
|
|
|
if err != nil {
|
|
|
|
glog.Errorf("error fetching remote image: %v", err)
|
|
|
|
http.Error(w, fmt.Sprintf("Error fetching remote image: %v", err), http.StatusInternalServerError)
|
|
|
|
return
|
2013-12-04 05:55:56 -05:00
|
|
|
}
|
|
|
|
|
2013-12-26 11:27:07 -05:00
|
|
|
b, _ := transform.Transform(image.Bytes, req.Options)
|
|
|
|
image.Bytes = b
|
2013-12-05 02:42:59 -05:00
|
|
|
|
2013-12-04 05:55:56 -05:00
|
|
|
w.Header().Add("Content-Length", strconv.Itoa(len(image.Bytes)))
|
|
|
|
w.Header().Add("Expires", image.Expires.Format(time.RFC1123))
|
|
|
|
w.Write(image.Bytes)
|
|
|
|
}
|
|
|
|
|
2013-12-25 21:48:10 -05:00
|
|
|
func (p *Proxy) fetchRemoteImage(u string) (*data.Image, error) {
|
|
|
|
resp, err := p.Client.Get(u)
|
2013-11-27 15:10:29 -05:00
|
|
|
if err != nil {
|
2013-12-04 05:55:56 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2013-11-27 15:10:29 -05:00
|
|
|
if resp.StatusCode != http.StatusOK {
|
2013-12-04 05:55:56 -05:00
|
|
|
return nil, errors.New(fmt.Sprintf("HTTP status not OK: %v", resp.Status))
|
2013-11-27 15:10:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
defer resp.Body.Close()
|
2013-12-04 05:55:56 -05:00
|
|
|
b, err := ioutil.ReadAll(resp.Body)
|
2013-11-27 15:10:29 -05:00
|
|
|
if err != nil {
|
2013-12-04 05:55:56 -05:00
|
|
|
return nil, err
|
2013-11-27 15:10:29 -05:00
|
|
|
}
|
2013-12-04 05:55:56 -05:00
|
|
|
|
|
|
|
return &data.Image{
|
|
|
|
URL: u,
|
|
|
|
Expires: parseExpires(resp),
|
|
|
|
Etag: resp.Header.Get("Etag"),
|
|
|
|
Bytes: b,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2013-12-04 06:12:56 -05:00
|
|
|
// allowed returns whether the specified URL is on the whitelist of remote hosts.
|
|
|
|
func (p *Proxy) allowed(u *url.URL) bool {
|
|
|
|
if len(p.Whitelist) == 0 {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, host := range p.Whitelist {
|
|
|
|
if u.Host == host {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2013-12-04 05:55:56 -05:00
|
|
|
func parseExpires(resp *http.Response) time.Time {
|
|
|
|
exp := resp.Header.Get("Expires")
|
|
|
|
if exp == "" {
|
|
|
|
return time.Now()
|
|
|
|
}
|
|
|
|
|
|
|
|
t, err := time.Parse(time.RFC1123, exp)
|
|
|
|
if err != nil {
|
|
|
|
return time.Now()
|
|
|
|
}
|
|
|
|
|
|
|
|
return t
|
2013-11-27 15:10:29 -05:00
|
|
|
}
|