0
Fork 0
mirror of https://github.com/willnorris/imageproxy.git synced 2024-12-16 21:56:43 -05:00
imageproxy/proxy/proxy.go

205 lines
5 KiB
Go
Raw Normal View History

// Copyright 2013 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2013-11-27 14:02:21 -05:00
// Package proxy provides the image proxy.
package proxy
import (
"errors"
2013-11-27 14:02:21 -05:00
"fmt"
"io/ioutil"
2013-11-27 14:02:21 -05:00
"net/http"
"net/url"
"strconv"
2013-11-27 14:02:21 -05:00
"strings"
"time"
"github.com/golang/glog"
2013-12-25 21:48:10 -05:00
"github.com/gregjones/httpcache"
"github.com/willnorris/go-imageproxy/data"
2013-12-05 02:42:59 -05:00
"github.com/willnorris/go-imageproxy/transform"
2013-11-27 14:02:21 -05:00
)
// URLError reports a malformed URL error.
type URLError struct {
Message string
URL *url.URL
}
func (e URLError) Error() string {
return fmt.Sprintf("malformed URL %q: %s", e.URL, e.Message)
}
// NewRequest parses an http.Request into an image request.
func NewRequest(r *http.Request) (*data.Request, error) {
2013-11-27 14:02:21 -05:00
var err error
req := new(data.Request)
2013-11-27 14:02:21 -05:00
path := r.URL.Path[1:] // strip leading slash
req.URL, err = url.Parse(path)
if err != nil || !req.URL.IsAbs() {
// first segment is likely options
parts := strings.SplitN(path, "/", 2)
if len(parts) != 2 {
return nil, URLError{"too few path segments", r.URL}
}
req.URL, err = url.Parse(parts[1])
if err != nil {
return nil, URLError{fmt.Sprintf("unable to parse remote URL: %v", err), r.URL}
}
req.Options = data.ParseOptions(parts[0])
2013-11-27 14:02:21 -05:00
}
if !req.URL.IsAbs() {
return nil, URLError{"must provide absolute remote URL", r.URL}
}
if req.URL.Scheme != "http" && req.URL.Scheme != "https" {
return nil, URLError{"remote URL must have http or https URL", r.URL}
}
// query string is always part of the remote URL
req.URL.RawQuery = r.URL.RawQuery
return req, nil
}
// Proxy serves image requests.
type Proxy struct {
Client *http.Client // client used to fetch remote URLs
2013-12-25 21:48:10 -05:00
Cache Cache
2013-12-04 06:12:56 -05:00
// Whitelist specifies a list of remote hosts that images can be proxied from. An empty list means all hosts are allowed.
Whitelist []string
MaxWidth int
MaxHeight int
}
// NewProxy constructs a new proxy. The provided http Client will be used to
// fetch remote URLs. If nil is provided, http.DefaultClient will be used.
2013-12-25 21:48:10 -05:00
func NewProxy(client *http.Client, cache Cache) *Proxy {
if client == nil {
client = http.DefaultClient
}
2013-12-25 21:48:10 -05:00
if cache == nil {
cache = NopCache
}
return &Proxy{
Client: &http.Client{
Transport: &httpcache.Transport{
Transport: client.Transport,
Cache: cache,
MarkCachedResponses: true,
},
},
Cache: cache,
}
}
// ServeHTTP handles image requests.
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
req, err := NewRequest(r)
if err != nil {
glog.Errorf("invalid request URL: %v", err)
http.Error(w, fmt.Sprintf("invalid request URL: %v", err), http.StatusBadRequest)
return
}
if p.MaxWidth > 0 && int(req.Options.Width) > p.MaxWidth {
req.Options.Width = float64(p.MaxWidth)
}
if p.MaxHeight > 0 && int(req.Options.Height) > p.MaxHeight {
req.Options.Height = float64(p.MaxHeight)
}
u := req.URL.String()
glog.Infof("request for image: %v", u)
2013-12-04 06:12:56 -05:00
if !p.allowed(req.URL) {
glog.Errorf("remote URL is not for an allowed host: %v", req.URL.Host)
http.Error(w, fmt.Sprintf("remote URL is not for an allowed host: %v", req.URL.Host), http.StatusBadRequest)
2013-12-04 06:12:56 -05:00
return
}
2013-12-25 21:48:10 -05:00
image, err := p.fetchRemoteImage(u)
if err != nil {
glog.Errorf("error fetching remote image: %v", err)
http.Error(w, fmt.Sprintf("Error fetching remote image: %v", err), http.StatusInternalServerError)
return
}
2013-12-05 02:42:59 -05:00
image, _ = transform.Transform(*image, req.Options)
w.Header().Add("Content-Length", strconv.Itoa(len(image.Bytes)))
w.Header().Add("Expires", image.Expires.Format(time.RFC1123))
w.Write(image.Bytes)
}
2013-12-25 21:48:10 -05:00
func (p *Proxy) fetchRemoteImage(u string) (*data.Image, error) {
glog.Infof("fetching remote image: %s", u)
2013-12-25 21:48:10 -05:00
resp, err := p.Client.Get(u)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, errors.New(fmt.Sprintf("HTTP status not OK: %v", resp.Status))
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return &data.Image{
URL: u,
Expires: parseExpires(resp),
Etag: resp.Header.Get("Etag"),
Bytes: b,
}, nil
}
2013-12-04 06:12:56 -05:00
// allowed returns whether the specified URL is on the whitelist of remote hosts.
func (p *Proxy) allowed(u *url.URL) bool {
if len(p.Whitelist) == 0 {
return true
}
for _, host := range p.Whitelist {
if u.Host == host {
return true
}
}
return false
}
func parseExpires(resp *http.Response) time.Time {
exp := resp.Header.Get("Expires")
if exp == "" {
return time.Now()
}
t, err := time.Parse(time.RFC1123, exp)
if err != nil {
return time.Now()
}
return t
}