0
Fork 0
mirror of https://github.com/willnorris/imageproxy.git synced 2024-12-16 21:56:43 -05:00
imageproxy/proxy/proxy.go
2013-12-06 17:40:35 -08:00

219 lines
5.4 KiB
Go

// Copyright 2013 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package proxy provides the image proxy.
package proxy
import (
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"github.com/willnorris/go-imageproxy/cache"
"github.com/willnorris/go-imageproxy/data"
"github.com/willnorris/go-imageproxy/transform"
)
// URLError reports a malformed URL error.
type URLError struct {
Message string
URL *url.URL
}
func (e URLError) Error() string {
return fmt.Sprintf("malformed URL %q: %s", e.URL, e.Message)
}
// NewRequest parses an http.Request into an image request.
func NewRequest(r *http.Request) (*data.Request, error) {
var err error
req := new(data.Request)
path := r.URL.Path[1:] // strip leading slash
req.URL, err = url.Parse(path)
if err != nil || !req.URL.IsAbs() {
// first segment is likely options
parts := strings.SplitN(path, "/", 2)
if len(parts) != 2 {
return nil, URLError{"too few path segments", r.URL}
}
req.URL, err = url.Parse(parts[1])
if err != nil {
return nil, URLError{fmt.Sprintf("unable to parse remote URL: %v", err), r.URL}
}
req.Options = data.ParseOptions(parts[0])
}
if !req.URL.IsAbs() {
return nil, URLError{"must provide absolute remote URL", r.URL}
}
if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
return nil, URLError{"remote URL must have http or https URL", r.URL}
}
// query string is always part of the remote URL
req.URL.RawQuery = r.URL.RawQuery
return req, nil
}
// Proxy serves image requests.
type Proxy struct {
Client *http.Client // client used to fetch remote URLs
Cache cache.Cache
// Whitelist specifies a list of remote hosts that images can be proxied from. An empty list means all hosts are allowed.
Whitelist []string
MaxWidth int
MaxHeight int
}
// NewProxy constructs a new proxy. The provided http Client will be used to
// fetch remote URLs. If nil is provided, http.DefaultClient will be used.
func NewProxy(client *http.Client) *Proxy {
if client == nil {
client = http.DefaultClient
}
return &Proxy{Client: client, Cache: cache.NopCache}
}
// ServeHTTP handles image requests.
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
req, err := NewRequest(r)
if err != nil {
glog.Errorf("invalid request URL: %v", err)
http.Error(w, fmt.Sprintf("invalid request URL: %v", err), http.StatusBadRequest)
return
}
if p.MaxWidth > 0 && int(req.Options.Width) > p.MaxWidth {
req.Options.Width = float64(p.MaxWidth)
}
if p.MaxHeight > 0 && int(req.Options.Height) > p.MaxHeight {
req.Options.Height = float64(p.MaxHeight)
}
u := req.URL.String()
glog.Infof("request for image: %v", u)
if !p.allowed(req.URL) {
glog.Errorf("remote URL is not for an allowed host: %v", req.URL.Host)
http.Error(w, fmt.Sprintf("remote URL is not for an allowed host: %v", req.URL.Host), http.StatusBadRequest)
return
}
image, ok := p.Cache.Get(u)
if !ok {
glog.Infof("image not cached")
image, err = p.fetchRemoteImage(u, nil)
if err != nil {
glog.Errorf("error fetching remote image: %v", err)
}
p.Cache.Save(image)
} else if time.Now().After(image.Expires) {
glog.Infof("cached image expired")
image, err = p.fetchRemoteImage(u, image)
if err != nil {
glog.Errorf("error fetching remote image: %v", err)
}
p.Cache.Save(image)
} else {
glog.Infof("serving from cache")
}
image, _ = transform.Transform(*image, req.Options)
w.Header().Add("Content-Length", strconv.Itoa(len(image.Bytes)))
w.Header().Add("Expires", image.Expires.Format(time.RFC1123))
w.Write(image.Bytes)
}
func (p *Proxy) fetchRemoteImage(u string, cached *data.Image) (*data.Image, error) {
glog.Infof("fetching remote image: %s", u)
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return nil, err
}
if cached != nil && cached.Etag != "" {
req.Header.Add("If-None-Match", cached.Etag)
}
resp, err := p.Client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusNotModified {
glog.Infof("remote image not modified (304 response)")
cached.Expires = parseExpires(resp)
return cached, nil
}
if resp.StatusCode != http.StatusOK {
return nil, errors.New(fmt.Sprintf("HTTP status not OK: %v", resp.Status))
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return &data.Image{
URL: u,
Expires: parseExpires(resp),
Etag: resp.Header.Get("Etag"),
Bytes: b,
}, nil
}
// allowed returns whether the specified URL is on the whitelist of remote hosts.
func (p *Proxy) allowed(u *url.URL) bool {
if len(p.Whitelist) == 0 {
return true
}
for _, host := range p.Whitelist {
if u.Host == host {
return true
}
}
return false
}
func parseExpires(resp *http.Response) time.Time {
exp := resp.Header.Get("Expires")
if exp == "" {
return time.Now()
}
t, err := time.Parse(time.RFC1123, exp)
if err != nil {
return time.Now()
}
return t
}