mirror of
https://github.com/willnorris/imageproxy.git
synced 2025-04-01 02:42:37 -05:00
vendor: remove unused packages
This commit is contained in:
parent
c751f5b2c9
commit
9654679655
27 changed files with 0 additions and 2182 deletions
3
vendor/github.com/kr/http/transport/AUTHORS
generated
vendored
3
vendor/github.com/kr/http/transport/AUTHORS
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
3
vendor/github.com/kr/http/transport/CONTRIBUTORS
generated
vendored
3
vendor/github.com/kr/http/transport/CONTRIBUTORS
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
27
vendor/github.com/kr/http/transport/LICENSE
generated
vendored
27
vendor/github.com/kr/http/transport/LICENSE
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2009 The transport Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4
vendor/github.com/kr/http/transport/Readme
generated
vendored
4
vendor/github.com/kr/http/transport/Readme
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
package transport contains a generic wrapper for http.RoundTripper
|
||||
|
||||
It was shamelessly copied (slightly modified) from
|
||||
https://github.com/golang/oauth2/blob/95a9f97e5/transport.go
|
132
vendor/github.com/kr/http/transport/transport.go
generated
vendored
132
vendor/github.com/kr/http/transport/transport.go
generated
vendored
|
@ -1,132 +0,0 @@
|
|||
// Copyright 2014 The transport Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package transport provides a general-purpose wapper for
|
||||
// http.RoundTripper. It implements the pattern of taking a
|
||||
// request, modifying a copy, and passing the modified copy to an
|
||||
// underlying RoundTripper, including bookkeeping necessary to
|
||||
// cancel in-flight requests.
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Wrapper is an http.RoundTripper that makes HTTP requests,
|
||||
// wrapping a base RoundTripper and altering every outgoing
|
||||
// request in some way.
|
||||
type Wrapper struct {
|
||||
// Modify alters the request as needed.
|
||||
Modify func(*http.Request) error
|
||||
|
||||
// Base is the base RoundTripper used to make HTTP requests.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
|
||||
mu sync.Mutex // guards modReq
|
||||
modReq map[*http.Request]*http.Request // original -> modified
|
||||
}
|
||||
|
||||
// RoundTrip provides a copy of req
|
||||
// to the underlying RoundTripper,
|
||||
// altered in some way by Modify.
|
||||
func (t *Wrapper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if t.Modify == nil {
|
||||
return nil, errors.New("transport: Wrapper's Modify is nil")
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
err := t.Modify(req2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.setModReq(req, req2)
|
||||
res, err := t.base().RoundTrip(req2)
|
||||
if err != nil {
|
||||
t.setModReq(req, nil)
|
||||
return nil, err
|
||||
}
|
||||
res.Body = &onEOFReader{
|
||||
rc: res.Body,
|
||||
fn: func() { t.setModReq(req, nil) },
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// CancelRequest cancels an in-flight request by closing its connection.
|
||||
func (t *Wrapper) CancelRequest(req *http.Request) {
|
||||
type canceler interface {
|
||||
CancelRequest(*http.Request)
|
||||
}
|
||||
if cr, ok := t.base().(canceler); ok {
|
||||
t.mu.Lock()
|
||||
modReq := t.modReq[req]
|
||||
delete(t.modReq, req)
|
||||
t.mu.Unlock()
|
||||
cr.CancelRequest(modReq)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Wrapper) base() http.RoundTripper {
|
||||
if t.Base != nil {
|
||||
return t.Base
|
||||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
func (t *Wrapper) setModReq(orig, mod *http.Request) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.modReq == nil {
|
||||
t.modReq = make(map[*http.Request]*http.Request)
|
||||
}
|
||||
if mod == nil {
|
||||
delete(t.modReq, orig)
|
||||
} else {
|
||||
t.modReq[orig] = mod
|
||||
}
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type onEOFReader struct {
|
||||
rc io.ReadCloser
|
||||
fn func()
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Close() error {
|
||||
err := r.rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *onEOFReader) runFunc() {
|
||||
if fn := r.fn; fn != nil {
|
||||
fn()
|
||||
r.fn = nil
|
||||
}
|
||||
}
|
27
vendor/github.com/petar/GoLLRB/LICENSE
generated
vendored
27
vendor/github.com/petar/GoLLRB/LICENSE
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2010, Petar Maymounkov
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
(*) Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
|
||||
(*) Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
(*) Neither the name of Petar Maymounkov nor the names of its contributors may be
|
||||
used to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
66
vendor/github.com/petar/GoLLRB/README.md
generated
vendored
66
vendor/github.com/petar/GoLLRB/README.md
generated
vendored
|
@ -1,66 +0,0 @@
|
|||
# GoLLRB
|
||||
|
||||
GoLLRB is a Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary
|
||||
search trees in Go Language.
|
||||
|
||||
## Overview
|
||||
|
||||
As of this writing and to the best of the author's knowledge,
|
||||
Go still does not have a balanced binary search tree (BBST) data structure.
|
||||
These data structures are quite useful in a variety of cases. A BBST maintains
|
||||
elements in sorted order under dynamic updates (inserts and deletes) and can
|
||||
support various order-specific queries. Furthermore, in practice one often
|
||||
implements other common data structures like Priority Queues, using BBST's.
|
||||
|
||||
2-3 trees (a type of BBST's), as well as the runtime-similar 2-3-4 trees, are
|
||||
the de facto standard BBST algoritms found in implementations of Python, Java,
|
||||
and other libraries. The LLRB method of implementing 2-3 trees is a recent
|
||||
improvement over the traditional implementation. The LLRB approach was
|
||||
discovered relatively recently (in 2008) by Robert Sedgewick of Princeton
|
||||
University.
|
||||
|
||||
GoLLRB is a Go implementation of LLRB 2-3 trees.
|
||||
|
||||
## Maturity
|
||||
|
||||
GoLLRB has been used in some pretty heavy-weight machine learning tasks over many gigabytes of data.
|
||||
I consider it to be in stable, perhaps even production, shape. There are no known bugs.
|
||||
|
||||
## Installation
|
||||
|
||||
With a healthy Go Language installed, simply run `go get github.com/petar/GoLLRB/llrb`
|
||||
|
||||
## Example
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/petar/GoLLRB/llrb"
|
||||
)
|
||||
|
||||
func lessInt(a, b interface{}) bool { return a.(int) < b.(int) }
|
||||
|
||||
func main() {
|
||||
tree := llrb.New(lessInt)
|
||||
tree.ReplaceOrInsert(1)
|
||||
tree.ReplaceOrInsert(2)
|
||||
tree.ReplaceOrInsert(3)
|
||||
tree.ReplaceOrInsert(4)
|
||||
tree.DeleteMin()
|
||||
tree.Delete(4)
|
||||
c := tree.IterAscend()
|
||||
for {
|
||||
u := <-c
|
||||
if u == nil {
|
||||
break
|
||||
}
|
||||
fmt.Printf("%d\n", int(u.(int)))
|
||||
}
|
||||
}
|
||||
|
||||
## About
|
||||
|
||||
GoLLRB was written by [Petar Maymounkov](http://pdos.csail.mit.edu/~petar/).
|
||||
|
||||
Follow me on [Twitter @maymounkov](http://www.twitter.com/maymounkov)!
|
39
vendor/github.com/petar/GoLLRB/llrb/avgvar.go
generated
vendored
39
vendor/github.com/petar/GoLLRB/llrb/avgvar.go
generated
vendored
|
@ -1,39 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
import "math"
|
||||
|
||||
// avgVar maintains the average and variance of a stream of numbers
|
||||
// in a space-efficient manner.
|
||||
type avgVar struct {
|
||||
count int64
|
||||
sum, sumsq float64
|
||||
}
|
||||
|
||||
func (av *avgVar) Init() {
|
||||
av.count = 0
|
||||
av.sum = 0.0
|
||||
av.sumsq = 0.0
|
||||
}
|
||||
|
||||
func (av *avgVar) Add(sample float64) {
|
||||
av.count++
|
||||
av.sum += sample
|
||||
av.sumsq += sample * sample
|
||||
}
|
||||
|
||||
func (av *avgVar) GetCount() int64 { return av.count }
|
||||
|
||||
func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) }
|
||||
|
||||
func (av *avgVar) GetTotal() float64 { return av.sum }
|
||||
|
||||
func (av *avgVar) GetVar() float64 {
|
||||
a := av.GetAvg()
|
||||
return av.sumsq/float64(av.count) - a*a
|
||||
}
|
||||
|
||||
func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) }
|
93
vendor/github.com/petar/GoLLRB/llrb/iterator.go
generated
vendored
93
vendor/github.com/petar/GoLLRB/llrb/iterator.go
generated
vendored
|
@ -1,93 +0,0 @@
|
|||
package llrb
|
||||
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
//func (t *Tree) Ascend(iterator ItemIterator) {
|
||||
// t.AscendGreaterOrEqual(Inf(-1), iterator)
|
||||
//}
|
||||
|
||||
func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
t.ascendRange(t.root, greaterOrEqual, lessThan, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !less(h.Item, sup) {
|
||||
return t.ascendRange(h.Left, inf, sup, iterator)
|
||||
}
|
||||
if less(h.Item, inf) {
|
||||
return t.ascendRange(h.Right, inf, sup, iterator)
|
||||
}
|
||||
|
||||
if !t.ascendRange(h.Left, inf, sup, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
return t.ascendRange(h.Right, inf, sup, iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual will call iterator once for each element greater or equal to
|
||||
// pivot in ascending order. It will stop whenever the iterator returns false.
|
||||
func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
t.ascendGreaterOrEqual(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !less(h.Item, pivot) {
|
||||
if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return t.ascendGreaterOrEqual(h.Right, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
t.ascendLessThan(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !t.ascendLessThan(h.Left, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
if less(h.Item, pivot) {
|
||||
return t.ascendLessThan(h.Left, pivot, iterator)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DescendLessOrEqual will call iterator once for each element less than the
|
||||
// pivot in descending order. It will stop whenever the iterator returns false.
|
||||
func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
||||
t.descendLessOrEqual(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if less(h.Item, pivot) || !less(pivot, h.Item) {
|
||||
if !t.descendLessOrEqual(h.Right, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return t.descendLessOrEqual(h.Left, pivot, iterator)
|
||||
}
|
46
vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
generated
vendored
46
vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
// GetHeight() returns an item in the tree with key @key, and it's height in the tree
|
||||
func (t *LLRB) GetHeight(key Item) (result Item, depth int) {
|
||||
return t.getHeight(t.root, key)
|
||||
}
|
||||
|
||||
func (t *LLRB) getHeight(h *Node, item Item) (Item, int) {
|
||||
if h == nil {
|
||||
return nil, 0
|
||||
}
|
||||
if less(item, h.Item) {
|
||||
result, depth := t.getHeight(h.Left, item)
|
||||
return result, depth + 1
|
||||
}
|
||||
if less(h.Item, item) {
|
||||
result, depth := t.getHeight(h.Right, item)
|
||||
return result, depth + 1
|
||||
}
|
||||
return h.Item, 0
|
||||
}
|
||||
|
||||
// HeightStats() returns the average and standard deviation of the height
|
||||
// of elements in the tree
|
||||
func (t *LLRB) HeightStats() (avg, stddev float64) {
|
||||
av := &avgVar{}
|
||||
heightStats(t.root, 0, av)
|
||||
return av.GetAvg(), av.GetStdDev()
|
||||
}
|
||||
|
||||
func heightStats(h *Node, d int, av *avgVar) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
av.Add(float64(d))
|
||||
if h.Left != nil {
|
||||
heightStats(h.Left, d+1, av)
|
||||
}
|
||||
if h.Right != nil {
|
||||
heightStats(h.Right, d+1, av)
|
||||
}
|
||||
}
|
456
vendor/github.com/petar/GoLLRB/llrb/llrb.go
generated
vendored
456
vendor/github.com/petar/GoLLRB/llrb/llrb.go
generated
vendored
|
@ -1,456 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees,
|
||||
// based on the following work:
|
||||
//
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java
|
||||
//
|
||||
// 2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST
|
||||
// algoritms found in implementations of Python, Java, and other libraries. The LLRB
|
||||
// implementation of 2-3 trees is a recent improvement on the traditional implementation,
|
||||
// observed and documented by Robert Sedgewick.
|
||||
//
|
||||
package llrb
|
||||
|
||||
// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees
|
||||
type LLRB struct {
|
||||
count int
|
||||
root *Node
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Item
|
||||
Left, Right *Node // Pointers to left and right child nodes
|
||||
Black bool // If set, the color of the link (incoming from the parent) is black
|
||||
// In the LLRB, new nodes are always red, hence the zero-value for node
|
||||
}
|
||||
|
||||
type Item interface {
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
//
|
||||
func less(x, y Item) bool {
|
||||
if x == pinf {
|
||||
return false
|
||||
}
|
||||
if x == ninf {
|
||||
return true
|
||||
}
|
||||
return x.Less(y)
|
||||
}
|
||||
|
||||
// Inf returns an Item that is "bigger than" any other item, if sign is positive.
|
||||
// Otherwise it returns an Item that is "smaller than" any other item.
|
||||
func Inf(sign int) Item {
|
||||
if sign == 0 {
|
||||
panic("sign")
|
||||
}
|
||||
if sign > 0 {
|
||||
return pinf
|
||||
}
|
||||
return ninf
|
||||
}
|
||||
|
||||
var (
|
||||
ninf = nInf{}
|
||||
pinf = pInf{}
|
||||
)
|
||||
|
||||
type nInf struct{}
|
||||
|
||||
func (nInf) Less(Item) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type pInf struct{}
|
||||
|
||||
func (pInf) Less(Item) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// New() allocates a new tree
|
||||
func New() *LLRB {
|
||||
return &LLRB{}
|
||||
}
|
||||
|
||||
// SetRoot sets the root node of the tree.
|
||||
// It is intended to be used by functions that deserialize the tree.
|
||||
func (t *LLRB) SetRoot(r *Node) {
|
||||
t.root = r
|
||||
}
|
||||
|
||||
// Root returns the root node of the tree.
|
||||
// It is intended to be used by functions that serialize the tree.
|
||||
func (t *LLRB) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Len returns the number of nodes in the tree.
|
||||
func (t *LLRB) Len() int { return t.count }
|
||||
|
||||
// Has returns true if the tree contains an element whose order is the same as that of key.
|
||||
func (t *LLRB) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Get retrieves an element from the tree whose order is the same as that of key.
|
||||
func (t *LLRB) Get(key Item) Item {
|
||||
h := t.root
|
||||
for h != nil {
|
||||
switch {
|
||||
case less(key, h.Item):
|
||||
h = h.Left
|
||||
case less(h.Item, key):
|
||||
h = h.Right
|
||||
default:
|
||||
return h.Item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Min returns the minimum element in the tree.
|
||||
func (t *LLRB) Min() Item {
|
||||
h := t.root
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
for h.Left != nil {
|
||||
h = h.Left
|
||||
}
|
||||
return h.Item
|
||||
}
|
||||
|
||||
// Max returns the maximum element in the tree.
|
||||
func (t *LLRB) Max() Item {
|
||||
h := t.root
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
for h.Right != nil {
|
||||
h = h.Right
|
||||
}
|
||||
return h.Item
|
||||
}
|
||||
|
||||
func (t *LLRB) ReplaceOrInsertBulk(items ...Item) {
|
||||
for _, i := range items {
|
||||
t.ReplaceOrInsert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *LLRB) InsertNoReplaceBulk(items ...Item) {
|
||||
for _, i := range items {
|
||||
t.InsertNoReplace(i)
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOrInsert inserts item into the tree. If an existing
|
||||
// element has the same order, it is removed from the tree and returned.
|
||||
func (t *LLRB) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("inserting nil item")
|
||||
}
|
||||
var replaced Item
|
||||
t.root, replaced = t.replaceOrInsert(t.root, item)
|
||||
t.root.Black = true
|
||||
if replaced == nil {
|
||||
t.count++
|
||||
}
|
||||
return replaced
|
||||
}
|
||||
|
||||
func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) {
|
||||
if h == nil {
|
||||
return newNode(item), nil
|
||||
}
|
||||
|
||||
h = walkDownRot23(h)
|
||||
|
||||
var replaced Item
|
||||
if less(item, h.Item) { // BUG
|
||||
h.Left, replaced = t.replaceOrInsert(h.Left, item)
|
||||
} else if less(h.Item, item) {
|
||||
h.Right, replaced = t.replaceOrInsert(h.Right, item)
|
||||
} else {
|
||||
replaced, h.Item = h.Item, item
|
||||
}
|
||||
|
||||
h = walkUpRot23(h)
|
||||
|
||||
return h, replaced
|
||||
}
|
||||
|
||||
// InsertNoReplace inserts item into the tree. If an existing
|
||||
// element has the same order, both elements remain in the tree.
|
||||
func (t *LLRB) InsertNoReplace(item Item) {
|
||||
if item == nil {
|
||||
panic("inserting nil item")
|
||||
}
|
||||
t.root = t.insertNoReplace(t.root, item)
|
||||
t.root.Black = true
|
||||
t.count++
|
||||
}
|
||||
|
||||
func (t *LLRB) insertNoReplace(h *Node, item Item) *Node {
|
||||
if h == nil {
|
||||
return newNode(item)
|
||||
}
|
||||
|
||||
h = walkDownRot23(h)
|
||||
|
||||
if less(item, h.Item) {
|
||||
h.Left = t.insertNoReplace(h.Left, item)
|
||||
} else {
|
||||
h.Right = t.insertNoReplace(h.Right, item)
|
||||
}
|
||||
|
||||
return walkUpRot23(h)
|
||||
}
|
||||
|
||||
// Rotation driver routines for 2-3 algorithm
|
||||
|
||||
func walkDownRot23(h *Node) *Node { return h }
|
||||
|
||||
func walkUpRot23(h *Node) *Node {
|
||||
if isRed(h.Right) && !isRed(h.Left) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Rotation driver routines for 2-3-4 algorithm
|
||||
|
||||
func walkDownRot234(h *Node) *Node {
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func walkUpRot234(h *Node) *Node {
|
||||
if isRed(h.Right) && !isRed(h.Left) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// DeleteMin deletes the minimum element in the tree and returns the
|
||||
// deleted item or nil otherwise.
|
||||
func (t *LLRB) DeleteMin() Item {
|
||||
var deleted Item
|
||||
t.root, deleted = deleteMin(t.root)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
// deleteMin code for LLRB 2-3 trees
|
||||
func deleteMin(h *Node) (*Node, Item) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if h.Left == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
|
||||
if !isRed(h.Left) && !isRed(h.Left.Left) {
|
||||
h = moveRedLeft(h)
|
||||
}
|
||||
|
||||
var deleted Item
|
||||
h.Left, deleted = deleteMin(h.Left)
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// DeleteMax deletes the maximum element in the tree and returns
|
||||
// the deleted item or nil otherwise
|
||||
func (t *LLRB) DeleteMax() Item {
|
||||
var deleted Item
|
||||
t.root, deleted = deleteMax(t.root)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func deleteMax(h *Node) (*Node, Item) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if isRed(h.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
if h.Right == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
if !isRed(h.Right) && !isRed(h.Right.Left) {
|
||||
h = moveRedRight(h)
|
||||
}
|
||||
var deleted Item
|
||||
h.Right, deleted = deleteMax(h.Right)
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// Delete deletes an item from the tree whose key equals key.
|
||||
// The deleted item is return, otherwise nil is returned.
|
||||
func (t *LLRB) Delete(key Item) Item {
|
||||
var deleted Item
|
||||
t.root, deleted = t.delete(t.root, key)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func (t *LLRB) delete(h *Node, item Item) (*Node, Item) {
|
||||
var deleted Item
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if less(item, h.Item) {
|
||||
if h.Left == nil { // item not present. Nothing to delete
|
||||
return h, nil
|
||||
}
|
||||
if !isRed(h.Left) && !isRed(h.Left.Left) {
|
||||
h = moveRedLeft(h)
|
||||
}
|
||||
h.Left, deleted = t.delete(h.Left, item)
|
||||
} else {
|
||||
if isRed(h.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
// If @item equals @h.Item and no right children at @h
|
||||
if !less(h.Item, item) && h.Right == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
// PETAR: Added 'h.Right != nil' below
|
||||
if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) {
|
||||
h = moveRedRight(h)
|
||||
}
|
||||
// If @item equals @h.Item, and (from above) 'h.Right != nil'
|
||||
if !less(h.Item, item) {
|
||||
var subDeleted Item
|
||||
h.Right, subDeleted = deleteMin(h.Right)
|
||||
if subDeleted == nil {
|
||||
panic("logic")
|
||||
}
|
||||
deleted, h.Item = h.Item, subDeleted
|
||||
} else { // Else, @item is bigger than @h.Item
|
||||
h.Right, deleted = t.delete(h.Right, item)
|
||||
}
|
||||
}
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// Internal node manipulation routines
|
||||
|
||||
func newNode(item Item) *Node { return &Node{Item: item} }
|
||||
|
||||
func isRed(h *Node) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
return !h.Black
|
||||
}
|
||||
|
||||
func rotateLeft(h *Node) *Node {
|
||||
x := h.Right
|
||||
if x.Black {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.Right = x.Left
|
||||
x.Left = h
|
||||
x.Black = h.Black
|
||||
h.Black = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rotateRight(h *Node) *Node {
|
||||
x := h.Left
|
||||
if x.Black {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.Left = x.Right
|
||||
x.Right = h
|
||||
x.Black = h.Black
|
||||
h.Black = false
|
||||
return x
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func flip(h *Node) {
|
||||
h.Black = !h.Black
|
||||
h.Left.Black = !h.Left.Black
|
||||
h.Right.Black = !h.Right.Black
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func moveRedLeft(h *Node) *Node {
|
||||
flip(h)
|
||||
if isRed(h.Right.Left) {
|
||||
h.Right = rotateRight(h.Right)
|
||||
h = rotateLeft(h)
|
||||
flip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func moveRedRight(h *Node) *Node {
|
||||
flip(h)
|
||||
if isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
flip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func fixUp(h *Node) *Node {
|
||||
if isRed(h.Right) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
17
vendor/github.com/petar/GoLLRB/llrb/util.go
generated
vendored
17
vendor/github.com/petar/GoLLRB/llrb/util.go
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
type Int int
|
||||
|
||||
func (x Int) Less(than Item) bool {
|
||||
return x < than.(Int)
|
||||
}
|
||||
|
||||
type String string
|
||||
|
||||
func (x String) Less(than Item) bool {
|
||||
return x < than.(String)
|
||||
}
|
19
vendor/github.com/sqs/s3/License
generated
vendored
19
vendor/github.com/sqs/s3/License
generated
vendored
|
@ -1,19 +0,0 @@
|
|||
Copyright (c) 2012 Keith Rarick.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
4
vendor/github.com/sqs/s3/Readme
generated
vendored
4
vendor/github.com/sqs/s3/Readme
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
Package s3 signs HTTP requests for use with Amazon’s S3 API.
|
||||
|
||||
Documentation:
|
||||
http://godoc.org/github.com/kr/s3
|
28
vendor/github.com/sqs/s3/client.go
generated
vendored
28
vendor/github.com/sqs/s3/client.go
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/kr/http/transport"
|
||||
)
|
||||
|
||||
// Client returns an HTTP client that signs all outgoing requests.
|
||||
// The returned Transport also provides CancelRequest.
|
||||
// Client is equivalent to DefaultService.Client.
|
||||
func Client(k Keys) *http.Client {
|
||||
return DefaultService.Client(k)
|
||||
}
|
||||
|
||||
// Client returns an HTTP client that signs all outgoing requests.
|
||||
// The returned Transport also provides CancelRequest.
|
||||
func (s *Service) Client(k Keys) *http.Client {
|
||||
tr := &transport.Wrapper{Modify: func(r *http.Request) error {
|
||||
if r.Header.Get("Date") == "" {
|
||||
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
}
|
||||
s.Sign(r, k)
|
||||
return nil
|
||||
}}
|
||||
return &http.Client{Transport: tr}
|
||||
}
|
4
vendor/github.com/sqs/s3/s3util/Readme
generated
vendored
4
vendor/github.com/sqs/s3/s3util/Readme
generated
vendored
|
@ -1,4 +0,0 @@
|
|||
Package s3util provides streaming transfers to and from Amazon S3.
|
||||
|
||||
Full documentation:
|
||||
http://godoc.org/github.com/kr/s3/s3util
|
28
vendor/github.com/sqs/s3/s3util/config.go
generated
vendored
28
vendor/github.com/sqs/s3/s3util/config.go
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
// Package s3util provides streaming transfers to and from Amazon S3.
|
||||
//
|
||||
// To use it, open or create an S3 object, read or write data,
|
||||
// and close the object.
|
||||
//
|
||||
// You must assign valid credentials to DefaultConfig.Keys before using
|
||||
// DefaultConfig. Be sure to close an io.WriteCloser returned by this package,
|
||||
// to flush buffers and complete the multipart upload process.
|
||||
package s3util
|
||||
|
||||
// TODO(kr): parse error responses; return structured data
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/sqs/s3"
|
||||
)
|
||||
|
||||
var DefaultConfig = &Config{
|
||||
Service: s3.DefaultService,
|
||||
Keys: new(s3.Keys),
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
*s3.Service
|
||||
*s3.Keys
|
||||
*http.Client // if nil, uses http.DefaultClient
|
||||
}
|
32
vendor/github.com/sqs/s3/s3util/delete.go
generated
vendored
32
vendor/github.com/sqs/s3/s3util/delete.go
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
package s3util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Delete deletes the S3 object at url. An HTTP status other than 204 (No
|
||||
// Content) is considered an error.
|
||||
//
|
||||
// If c is nil, Delete uses DefaultConfig.
|
||||
func Delete(url string, c *Config) (io.ReadCloser, error) {
|
||||
if c == nil {
|
||||
c = DefaultConfig
|
||||
}
|
||||
r, _ := http.NewRequest("DELETE", url, nil)
|
||||
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
c.Sign(r, *c.Keys)
|
||||
client := c.Client
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusNoContent {
|
||||
return nil, newRespError(resp)
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
29
vendor/github.com/sqs/s3/s3util/error.go
generated
vendored
29
vendor/github.com/sqs/s3/s3util/error.go
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
package s3util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type respError struct {
|
||||
r *http.Response
|
||||
b bytes.Buffer
|
||||
}
|
||||
|
||||
func newRespError(r *http.Response) *respError {
|
||||
e := new(respError)
|
||||
e.r = r
|
||||
io.Copy(&e.b, r.Body)
|
||||
r.Body.Close()
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *respError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"unwanted http status %d: %q",
|
||||
e.r.StatusCode,
|
||||
e.b.String(),
|
||||
)
|
||||
}
|
33
vendor/github.com/sqs/s3/s3util/open.go
generated
vendored
33
vendor/github.com/sqs/s3/s3util/open.go
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
package s3util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Open requests the S3 object at url. An HTTP status other than 200 is
|
||||
// considered an error.
|
||||
//
|
||||
// If c is nil, Open uses DefaultConfig.
|
||||
func Open(url string, c *Config) (io.ReadCloser, error) {
|
||||
if c == nil {
|
||||
c = DefaultConfig
|
||||
}
|
||||
// TODO(kr): maybe parallel range fetching
|
||||
r, _ := http.NewRequest("GET", url, nil)
|
||||
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
c.Sign(r, *c.Keys)
|
||||
client := c.Client
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != 200 && resp.StatusCode != http.StatusPartialContent {
|
||||
return nil, newRespError(resp)
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
218
vendor/github.com/sqs/s3/s3util/readdir.go
generated
vendored
218
vendor/github.com/sqs/s3/s3util/readdir.go
generated
vendored
|
@ -1,218 +0,0 @@
|
|||
package s3util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// File represents an S3 object or directory.
|
||||
type File struct {
|
||||
url string
|
||||
prefix string
|
||||
config *Config
|
||||
result *listObjectsResult
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
dir bool
|
||||
modTime time.Time
|
||||
sys *Stat
|
||||
}
|
||||
|
||||
// Stat contains information about an S3 object or directory.
|
||||
// It is the "underlying data source" returned by method Sys
|
||||
// for each FileInfo produced by this package.
|
||||
// fi.Sys().(*s3util.Stat)
|
||||
// For the meaning of these fields, see
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html.
|
||||
type Stat struct {
|
||||
Key string
|
||||
LastModified string
|
||||
ETag string // ETag value, without double quotes.
|
||||
Size string
|
||||
StorageClass string
|
||||
OwnerID string `xml:"Owner>ID"`
|
||||
OwnerName string `xml:"Owner>DisplayName"`
|
||||
}
|
||||
|
||||
type listObjectsResult struct {
|
||||
IsTruncated bool
|
||||
Contents []Stat
|
||||
Directories []string `xml:"CommonPrefixes>Prefix"` // Suffix "/" trimmed
|
||||
}
|
||||
|
||||
func (f *fileInfo) Name() string { return f.name }
|
||||
func (f *fileInfo) Size() int64 { return f.size }
|
||||
func (f *fileInfo) Mode() os.FileMode {
|
||||
if f.dir {
|
||||
return 0755 | os.ModeDir
|
||||
}
|
||||
return 0644
|
||||
}
|
||||
func (f *fileInfo) ModTime() time.Time {
|
||||
if f.modTime.IsZero() && f.sys != nil {
|
||||
// we return the zero value if a parse error ever happens.
|
||||
f.modTime, _ = time.Parse(time.RFC3339Nano, f.sys.LastModified)
|
||||
}
|
||||
return f.modTime
|
||||
}
|
||||
func (f *fileInfo) IsDir() bool { return f.dir }
|
||||
func (f *fileInfo) Sys() interface{} { return f.sys }
|
||||
|
||||
// NewFile returns a new File with the given URL and config.
|
||||
//
|
||||
// Set rawurl to a directory on S3, such as
|
||||
// https://mybucket.s3.amazonaws.com/myfolder.
|
||||
// The URL cannot have query parameters or a fragment.
|
||||
// If c is nil, DefaultConfig will be used.
|
||||
func NewFile(rawurl string, c *Config) (*File, error) {
|
||||
u, err := url.Parse(rawurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
return nil, errors.New("url cannot have raw query parameters.")
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
return nil, errors.New("url cannot have a fragment.")
|
||||
}
|
||||
|
||||
prefix := strings.TrimLeft(u.Path, "/")
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
u.Path = ""
|
||||
return &File{u.String(), prefix, c, nil}, nil
|
||||
}
|
||||
|
||||
// Readdir requests a list of entries in the S3 directory
|
||||
// represented by f and returns a slice of up to n FileInfo
|
||||
// values, in alphabetical order. Subsequent calls
|
||||
// on the same File will yield further FileInfos.
|
||||
// Only direct children are returned, not deeper descendants.
|
||||
func (f *File) Readdir(n int) ([]os.FileInfo, error) {
|
||||
if f.result != nil && !f.result.IsTruncated {
|
||||
return make([]os.FileInfo, 0), io.EOF
|
||||
}
|
||||
|
||||
reader, err := f.sendRequest(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
return f.parseResponse(reader)
|
||||
}
|
||||
|
||||
func (f *File) sendRequest(count int) (io.ReadCloser, error) {
|
||||
c := f.config
|
||||
if c == nil {
|
||||
c = DefaultConfig
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(f.url)
|
||||
buf.WriteString("?delimiter=%2F")
|
||||
if f.prefix != "" {
|
||||
buf.WriteString("&prefix=")
|
||||
buf.WriteString(url.QueryEscape(f.prefix))
|
||||
}
|
||||
if count > 0 {
|
||||
buf.WriteString("&max-keys=")
|
||||
buf.WriteString(strconv.Itoa(count))
|
||||
}
|
||||
if f.result != nil && f.result.IsTruncated {
|
||||
var lastDir, lastKey, marker string
|
||||
if len(f.result.Directories) > 0 {
|
||||
lastDir = f.result.Directories[len(f.result.Directories)-1]
|
||||
}
|
||||
if len(f.result.Contents) > 0 {
|
||||
lastKey = f.result.Contents[len(f.result.Contents)-1].Key
|
||||
}
|
||||
|
||||
if lastKey > lastDir {
|
||||
marker = lastKey
|
||||
} else {
|
||||
marker = lastDir
|
||||
}
|
||||
|
||||
if marker != "" {
|
||||
buf.WriteString("&marker=")
|
||||
buf.WriteString(url.QueryEscape(marker))
|
||||
}
|
||||
}
|
||||
u := buf.String()
|
||||
r, _ := http.NewRequest("GET", u, nil)
|
||||
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
c.Sign(r, *c.Keys)
|
||||
client := c.Client
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, newRespError(resp)
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func (f *File) parseResponse(reader io.Reader) ([]os.FileInfo, error) {
|
||||
// Reading it all in now makes the XML decoding way faster.
|
||||
bb, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reader = bytes.NewReader(bb)
|
||||
|
||||
decoder := xml.NewDecoder(reader)
|
||||
result := listObjectsResult{}
|
||||
if err := decoder.Decode(&result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
infos := make([]os.FileInfo, len(result.Contents)+len(result.Directories))
|
||||
var size int64
|
||||
var name string
|
||||
var is_dir bool
|
||||
for i, content := range result.Contents {
|
||||
c := content
|
||||
c.ETag = strings.Trim(c.ETag, `"`)
|
||||
size, _ = strconv.ParseInt(c.Size, 10, 0)
|
||||
if size == 0 && strings.HasSuffix(c.Key, "/") {
|
||||
name = strings.TrimRight(c.Key, "/")
|
||||
is_dir = true
|
||||
} else {
|
||||
name = c.Key
|
||||
is_dir = false
|
||||
}
|
||||
infos[i] = &fileInfo{
|
||||
name: name,
|
||||
size: size,
|
||||
dir: is_dir,
|
||||
sys: &c,
|
||||
}
|
||||
}
|
||||
for i, dir := range result.Directories {
|
||||
infos[len(result.Contents)+i] = &fileInfo{
|
||||
name: strings.TrimRight(dir, "/"),
|
||||
size: 0,
|
||||
dir: true,
|
||||
}
|
||||
}
|
||||
f.result = &result
|
||||
|
||||
return infos, nil
|
||||
}
|
291
vendor/github.com/sqs/s3/s3util/uploader.go
generated
vendored
291
vendor/github.com/sqs/s3/s3util/uploader.go
generated
vendored
|
@ -1,291 +0,0 @@
|
|||
package s3util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sqs/s3"
|
||||
)
|
||||
|
||||
// defined by amazon
|
||||
const (
|
||||
minPartSize = 5 * 1024 * 1024
|
||||
maxPartSize = 1<<31 - 1 // for 32-bit use; amz max is 5GiB
|
||||
maxObjSize = 5 * 1024 * 1024 * 1024 * 1024
|
||||
maxNPart = 10000
|
||||
)
|
||||
|
||||
const (
|
||||
concurrency = 5
|
||||
nTry = 2
|
||||
)
|
||||
|
||||
type part struct {
|
||||
r io.ReadSeeker
|
||||
len int64
|
||||
|
||||
// read by xml encoder
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
|
||||
type uploader struct {
|
||||
s3 s3.Service
|
||||
keys s3.Keys
|
||||
url string
|
||||
client *http.Client
|
||||
UploadId string // written by xml decoder
|
||||
|
||||
bufsz int64
|
||||
buf []byte
|
||||
off int
|
||||
ch chan *part
|
||||
part int
|
||||
closed bool
|
||||
err error
|
||||
wg sync.WaitGroup
|
||||
|
||||
xml struct {
|
||||
XMLName string `xml:"CompleteMultipartUpload"`
|
||||
Part []*part
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates an S3 object at url and sends multipart upload requests as
|
||||
// data is written.
|
||||
//
|
||||
// If h is not nil, each of its entries is added to the HTTP request header.
|
||||
// If c is nil, Create uses DefaultConfig.
|
||||
func Create(url string, h http.Header, c *Config) (io.WriteCloser, error) {
|
||||
if c == nil {
|
||||
c = DefaultConfig
|
||||
}
|
||||
return newUploader(url, h, c)
|
||||
}
|
||||
|
||||
// Sends an S3 multipart upload initiation request.
|
||||
// See http://docs.amazonwebservices.com/AmazonS3/latest/dev/mpuoverview.html.
|
||||
// This initial request returns an UploadId that we use to identify
|
||||
// subsequent PUT requests.
|
||||
func newUploader(url string, h http.Header, c *Config) (u *uploader, err error) {
|
||||
u = new(uploader)
|
||||
u.s3 = *c.Service
|
||||
u.url = url
|
||||
u.keys = *c.Keys
|
||||
u.client = c.Client
|
||||
if u.client == nil {
|
||||
u.client = http.DefaultClient
|
||||
}
|
||||
u.bufsz = minPartSize
|
||||
r, err := http.NewRequest("POST", url+"?uploads", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
for k := range h {
|
||||
for _, v := range h[k] {
|
||||
r.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
u.s3.Sign(r, u.keys)
|
||||
resp, err := u.client.Do(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, newRespError(resp)
|
||||
}
|
||||
err = xml.NewDecoder(resp.Body).Decode(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.ch = make(chan *part)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go u.worker()
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func (u *uploader) Write(p []byte) (n int, err error) {
|
||||
if u.closed {
|
||||
return 0, syscall.EINVAL
|
||||
}
|
||||
if u.err != nil {
|
||||
return 0, u.err
|
||||
}
|
||||
for n < len(p) {
|
||||
if cap(u.buf) == 0 {
|
||||
u.buf = make([]byte, int(u.bufsz))
|
||||
// Increase part size (1.001x).
|
||||
// This lets us reach the max object size (5TiB) while
|
||||
// still doing minimal buffering for small objects.
|
||||
u.bufsz = min(u.bufsz+u.bufsz/1000, maxPartSize)
|
||||
}
|
||||
r := copy(u.buf[u.off:], p[n:])
|
||||
u.off += r
|
||||
n += r
|
||||
if u.off == len(u.buf) {
|
||||
u.flush()
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (u *uploader) flush() {
|
||||
u.wg.Add(1)
|
||||
u.part++
|
||||
p := &part{bytes.NewReader(u.buf[:u.off]), int64(u.off), u.part, ""}
|
||||
u.xml.Part = append(u.xml.Part, p)
|
||||
u.ch <- p
|
||||
u.buf, u.off = nil, 0
|
||||
}
|
||||
|
||||
func (u *uploader) worker() {
|
||||
for p := range u.ch {
|
||||
u.retryUploadPart(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Calls putPart up to nTry times to recover from transient errors.
|
||||
func (u *uploader) retryUploadPart(p *part) {
|
||||
defer u.wg.Done()
|
||||
defer func() { p.r = nil }() // free the large buffer
|
||||
var err error
|
||||
for i := 0; i < nTry; i++ {
|
||||
p.r.Seek(0, 0)
|
||||
err = u.putPart(p)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
u.err = err
|
||||
}
|
||||
|
||||
// Uploads part p, reading its contents from p.r.
|
||||
// Stores the ETag in p.ETag.
|
||||
func (u *uploader) putPart(p *part) error {
|
||||
v := url.Values{}
|
||||
v.Set("partNumber", strconv.Itoa(p.PartNumber))
|
||||
v.Set("uploadId", u.UploadId)
|
||||
req, err := http.NewRequest("PUT", u.url+"?"+v.Encode(), p.r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.ContentLength = p.len
|
||||
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
u.s3.Sign(req, u.keys)
|
||||
resp, err := u.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return newRespError(resp)
|
||||
}
|
||||
s := resp.Header.Get("etag") // includes quote chars for some reason
|
||||
if len(s) < 2 {
|
||||
return fmt.Errorf("received invalid etag %q", s)
|
||||
}
|
||||
p.ETag = s[1 : len(s)-1]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uploader) Close() error {
|
||||
if u.closed {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
if cap(u.buf) > 0 {
|
||||
u.flush()
|
||||
}
|
||||
u.wg.Wait()
|
||||
close(u.ch)
|
||||
u.closed = true
|
||||
if u.err != nil {
|
||||
u.abort()
|
||||
return u.err
|
||||
}
|
||||
|
||||
if u.part == 0 {
|
||||
// Can't upload an empty file with multipart uploads.
|
||||
u.abort()
|
||||
if u.err != nil {
|
||||
return u.err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", u.url, bytes.NewReader(nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
u.s3.Sign(req, u.keys)
|
||||
resp, err := u.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return newRespError(resp)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
body, err := xml.Marshal(u.xml)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b := bytes.NewBuffer(body)
|
||||
v := url.Values{}
|
||||
v.Set("uploadId", u.UploadId)
|
||||
req, err := http.NewRequest("POST", u.url+"?"+v.Encode(), b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
u.s3.Sign(req, u.keys)
|
||||
resp, err := u.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return newRespError(resp)
|
||||
}
|
||||
resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uploader) abort() {
|
||||
// TODO(kr): devise a reasonable way to report an error here in addition
|
||||
// to the error that caused the abort.
|
||||
v := url.Values{}
|
||||
v.Set("uploadId", u.UploadId)
|
||||
s := u.url + "?" + v.Encode()
|
||||
req, err := http.NewRequest("DELETE", s, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
req.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))
|
||||
u.s3.Sign(req, u.keys)
|
||||
resp, err := u.client.Do(req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
200
vendor/github.com/sqs/s3/sign.go
generated
vendored
200
vendor/github.com/sqs/s3/sign.go
generated
vendored
|
@ -1,200 +0,0 @@
|
|||
// Package s3 signs HTTP requests for Amazon S3 and compatible services.
|
||||
package s3
|
||||
|
||||
// See
|
||||
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/RESTAuthentication.html.
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var signParams = map[string]bool{
|
||||
"acl": true,
|
||||
"delete": true,
|
||||
"lifecycle": true,
|
||||
"location": true,
|
||||
"logging": true,
|
||||
"notification": true,
|
||||
"partNumber": true,
|
||||
"policy": true,
|
||||
"requestPayment": true,
|
||||
"response-cache-control": true,
|
||||
"response-content-disposition": true,
|
||||
"response-content-encoding": true,
|
||||
"response-content-language": true,
|
||||
"response-content-type": true,
|
||||
"response-expires": true,
|
||||
"restore": true,
|
||||
"torrent": true,
|
||||
"uploadId": true,
|
||||
"uploads": true,
|
||||
"versionId": true,
|
||||
"versioning": true,
|
||||
"versions": true,
|
||||
"website": true,
|
||||
}
|
||||
|
||||
// Keys holds a set of Amazon Security Credentials.
|
||||
type Keys struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
|
||||
// SecurityToken is used for temporary security credentials.
|
||||
// If set, it will be added to header field X-Amz-Security-Token
|
||||
// before signing a request.
|
||||
SecurityToken string
|
||||
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/MakingRequests.html#TypesofSecurityCredentials
|
||||
}
|
||||
|
||||
// IdentityBucket returns subdomain.
|
||||
// It is designed to be used with S3-compatible services that
|
||||
// treat the entire subdomain as the bucket name, for example
|
||||
// storage.io.
|
||||
func IdentityBucket(subdomain string) string {
|
||||
return subdomain
|
||||
}
|
||||
|
||||
// AmazonBucket returns everything up to the last '.' in subdomain.
|
||||
// It is designed to be used with the Amazon service.
|
||||
// "johnsmith.s3" becomes "johnsmith"
|
||||
// "johnsmith.s3-eu-west-1" becomes "johnsmith"
|
||||
// "www.example.com.s3" becomes "www.example.com"
|
||||
func AmazonBucket(subdomain string) string {
|
||||
if i := strings.LastIndex(subdomain, "."); i != -1 {
|
||||
return subdomain[:i]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// DefaultService is the default Service used by Sign.
|
||||
var DefaultService = &Service{Domain: "amazonaws.com"}
|
||||
|
||||
// Sign signs an HTTP request with the given S3 keys.
|
||||
//
|
||||
// This function is a wrapper around DefaultService.Sign.
|
||||
func Sign(r *http.Request, k Keys) {
|
||||
DefaultService.Sign(r, k)
|
||||
}
|
||||
|
||||
// Service represents an S3-compatible service.
|
||||
type Service struct {
|
||||
// Domain is the service's root domain. It is used to extract
|
||||
// the subdomain from an http.Request before passing the
|
||||
// subdomain to Bucket.
|
||||
Domain string
|
||||
|
||||
// Bucket derives the bucket name from a subdomain.
|
||||
// If nil, AmazonBucket is used.
|
||||
Bucket func(subdomain string) string
|
||||
}
|
||||
|
||||
// Sign signs an HTTP request with the given S3 keys for use on service s.
|
||||
func (s *Service) Sign(r *http.Request, k Keys) {
|
||||
if k.SecurityToken != "" {
|
||||
r.Header.Set("X-Amz-Security-Token", k.SecurityToken)
|
||||
}
|
||||
h := hmac.New(sha1.New, []byte(k.SecretKey))
|
||||
s.writeSigData(h, r)
|
||||
sig := make([]byte, base64.StdEncoding.EncodedLen(h.Size()))
|
||||
base64.StdEncoding.Encode(sig, h.Sum(nil))
|
||||
r.Header.Set("Authorization", "AWS "+k.AccessKey+":"+string(sig))
|
||||
}
|
||||
|
||||
func (s *Service) writeSigData(w io.Writer, r *http.Request) {
|
||||
w.Write([]byte(r.Method))
|
||||
w.Write([]byte{'\n'})
|
||||
w.Write([]byte(r.Header.Get("content-md5")))
|
||||
w.Write([]byte{'\n'})
|
||||
w.Write([]byte(r.Header.Get("content-type")))
|
||||
w.Write([]byte{'\n'})
|
||||
if _, ok := r.Header["X-Amz-Date"]; !ok {
|
||||
w.Write([]byte(r.Header.Get("date")))
|
||||
}
|
||||
w.Write([]byte{'\n'})
|
||||
writeAmzHeaders(w, r)
|
||||
s.writeResource(w, r)
|
||||
}
|
||||
|
||||
func (s *Service) writeResource(w io.Writer, r *http.Request) {
|
||||
s.writeVhostBucket(w, strings.ToLower(r.Host))
|
||||
path := r.URL.RequestURI()
|
||||
if r.URL.RawQuery != "" {
|
||||
path = path[:len(path)-len(r.URL.RawQuery)-1]
|
||||
}
|
||||
w.Write([]byte(path))
|
||||
s.writeSubResource(w, r)
|
||||
}
|
||||
|
||||
func (s *Service) writeVhostBucket(w io.Writer, host string) {
|
||||
if i := strings.Index(host, ":"); i != -1 {
|
||||
host = host[:i]
|
||||
}
|
||||
|
||||
if host == s.Domain {
|
||||
// no vhost - do nothing
|
||||
} else if strings.HasSuffix(host, "."+s.Domain) {
|
||||
// vhost - bucket may be in prefix
|
||||
b := s.Bucket
|
||||
if b == nil {
|
||||
b = AmazonBucket
|
||||
}
|
||||
bucket := b(host[:len(host)-len(s.Domain)-1])
|
||||
|
||||
if bucket != "" {
|
||||
w.Write([]byte{'/'})
|
||||
w.Write([]byte(bucket))
|
||||
}
|
||||
} else {
|
||||
// cname - bucket is host
|
||||
w.Write([]byte{'/'})
|
||||
w.Write([]byte(host))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) writeSubResource(w io.Writer, r *http.Request) {
|
||||
var a []string
|
||||
for k, vs := range r.URL.Query() {
|
||||
if signParams[k] {
|
||||
for _, v := range vs {
|
||||
if v == "" {
|
||||
a = append(a, k)
|
||||
} else {
|
||||
a = append(a, k+"="+v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(a)
|
||||
var p byte = '?'
|
||||
for _, s := range a {
|
||||
w.Write([]byte{p})
|
||||
w.Write([]byte(s))
|
||||
p = '&'
|
||||
}
|
||||
}
|
||||
|
||||
func writeAmzHeaders(w io.Writer, r *http.Request) {
|
||||
var keys []string
|
||||
for k, _ := range r.Header {
|
||||
if strings.HasPrefix(strings.ToLower(k), "x-amz-") {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
var a []string
|
||||
for _, k := range keys {
|
||||
v := r.Header[k]
|
||||
a = append(a, strings.ToLower(k)+":"+strings.Join(v, ","))
|
||||
}
|
||||
for _, h := range a {
|
||||
w.Write([]byte(h))
|
||||
w.Write([]byte{'\n'})
|
||||
}
|
||||
}
|
194
vendor/golang.org/x/image/webp/nycbcra/nycbcra.go
generated
vendored
194
vendor/golang.org/x/image/webp/nycbcra/nycbcra.go
generated
vendored
|
@ -1,194 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package nycbcra provides non-alpha-premultiplied Y'CbCr-with-alpha image and
|
||||
// color types.
|
||||
//
|
||||
// Deprecated: as of Go 1.6. Use the standard image and image/color packages
|
||||
// instead.
|
||||
package nycbcra // import "golang.org/x/image/webp/nycbcra"
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
)
|
||||
|
||||
func init() {
|
||||
println("The golang.org/x/image/webp/nycbcra package is deprecated, as of Go 1.6. " +
|
||||
"Use the standard image and image/color packages instead.")
|
||||
}
|
||||
|
||||
// TODO: move this to the standard image and image/color packages, so that the
|
||||
// image/draw package can have fast-path code. Moving would rename:
|
||||
// nycbcra.Color to color.NYCbCrA
|
||||
// nycbcra.ColorModel to color.NYCbCrAModel
|
||||
// nycbcra.Image to image.NYCbCrA
|
||||
|
||||
// Color represents a non-alpha-premultiplied Y'CbCr-with-alpha color, having
|
||||
// 8 bits each for one luma, two chroma and one alpha component.
|
||||
type Color struct {
|
||||
color.YCbCr
|
||||
A uint8
|
||||
}
|
||||
|
||||
func (c Color) RGBA() (r, g, b, a uint32) {
|
||||
r8, g8, b8 := color.YCbCrToRGB(c.Y, c.Cb, c.Cr)
|
||||
a = uint32(c.A) * 0x101
|
||||
r = uint32(r8) * 0x101 * a / 0xffff
|
||||
g = uint32(g8) * 0x101 * a / 0xffff
|
||||
b = uint32(b8) * 0x101 * a / 0xffff
|
||||
return
|
||||
}
|
||||
|
||||
// ColorModel is the Model for non-alpha-premultiplied Y'CbCr-with-alpha colors.
|
||||
var ColorModel color.Model = color.ModelFunc(nYCbCrAModel)
|
||||
|
||||
func nYCbCrAModel(c color.Color) color.Color {
|
||||
switch c := c.(type) {
|
||||
case Color:
|
||||
return c
|
||||
case color.YCbCr:
|
||||
return Color{c, 0xff}
|
||||
}
|
||||
r, g, b, a := c.RGBA()
|
||||
|
||||
// Convert from alpha-premultiplied to non-alpha-premultiplied.
|
||||
if a != 0 {
|
||||
r = (r * 0xffff) / a
|
||||
g = (g * 0xffff) / a
|
||||
b = (b * 0xffff) / a
|
||||
}
|
||||
|
||||
y, u, v := color.RGBToYCbCr(uint8(r>>8), uint8(g>>8), uint8(b>>8))
|
||||
return Color{color.YCbCr{Y: y, Cb: u, Cr: v}, uint8(a >> 8)}
|
||||
}
|
||||
|
||||
// Image is an in-memory image of non-alpha-premultiplied Y'CbCr-with-alpha
|
||||
// colors. A and AStride are analogous to the Y and YStride fields of the
|
||||
// embedded YCbCr.
|
||||
type Image struct {
|
||||
image.YCbCr
|
||||
A []uint8
|
||||
AStride int
|
||||
}
|
||||
|
||||
func (p *Image) ColorModel() color.Model {
|
||||
return ColorModel
|
||||
}
|
||||
|
||||
func (p *Image) At(x, y int) color.Color {
|
||||
return p.NYCbCrAAt(x, y)
|
||||
}
|
||||
|
||||
func (p *Image) NYCbCrAAt(x, y int) Color {
|
||||
if !(image.Point{X: x, Y: y}.In(p.Rect)) {
|
||||
return Color{}
|
||||
}
|
||||
yi := p.YOffset(x, y)
|
||||
ci := p.COffset(x, y)
|
||||
ai := p.AOffset(x, y)
|
||||
return Color{
|
||||
color.YCbCr{
|
||||
Y: p.Y[yi],
|
||||
Cb: p.Cb[ci],
|
||||
Cr: p.Cr[ci],
|
||||
},
|
||||
p.A[ai],
|
||||
}
|
||||
}
|
||||
|
||||
// AOffset returns the index of the first element of A that corresponds to
|
||||
// the pixel at (x, y).
|
||||
func (p *Image) AOffset(x, y int) int {
|
||||
return (y-p.Rect.Min.Y)*p.AStride + (x - p.Rect.Min.X)
|
||||
}
|
||||
|
||||
// SubImage returns an image representing the portion of the image p visible
|
||||
// through r. The returned value shares pixels with the original image.
|
||||
func (p *Image) SubImage(r image.Rectangle) image.Image {
|
||||
// TODO: share code with image.NewYCbCr when this type moves into the
|
||||
// standard image package.
|
||||
r = r.Intersect(p.Rect)
|
||||
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
|
||||
// either r1 or r2 if the intersection is empty. Without explicitly checking for
|
||||
// this, the Pix[i:] expression below can panic.
|
||||
if r.Empty() {
|
||||
return &Image{
|
||||
YCbCr: image.YCbCr{
|
||||
SubsampleRatio: p.SubsampleRatio,
|
||||
},
|
||||
}
|
||||
}
|
||||
yi := p.YOffset(r.Min.X, r.Min.Y)
|
||||
ci := p.COffset(r.Min.X, r.Min.Y)
|
||||
ai := p.AOffset(r.Min.X, r.Min.Y)
|
||||
return &Image{
|
||||
YCbCr: image.YCbCr{
|
||||
Y: p.Y[yi:],
|
||||
Cb: p.Cb[ci:],
|
||||
Cr: p.Cr[ci:],
|
||||
SubsampleRatio: p.SubsampleRatio,
|
||||
YStride: p.YStride,
|
||||
CStride: p.CStride,
|
||||
Rect: r,
|
||||
},
|
||||
A: p.A[ai:],
|
||||
AStride: p.AStride,
|
||||
}
|
||||
}
|
||||
|
||||
// Opaque scans the entire image and reports whether it is fully opaque.
|
||||
func (p *Image) Opaque() bool {
|
||||
if p.Rect.Empty() {
|
||||
return true
|
||||
}
|
||||
i0, i1 := 0, p.Rect.Dx()
|
||||
for y := p.Rect.Min.Y; y < p.Rect.Max.Y; y++ {
|
||||
for _, a := range p.A[i0:i1] {
|
||||
if a != 0xff {
|
||||
return false
|
||||
}
|
||||
}
|
||||
i0 += p.AStride
|
||||
i1 += p.AStride
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// New returns a new Image with the given bounds and subsample ratio.
|
||||
func New(r image.Rectangle, subsampleRatio image.YCbCrSubsampleRatio) *Image {
|
||||
// TODO: share code with image.NewYCbCr when this type moves into the
|
||||
// standard image package.
|
||||
w, h, cw, ch := r.Dx(), r.Dy(), 0, 0
|
||||
switch subsampleRatio {
|
||||
case image.YCbCrSubsampleRatio422:
|
||||
cw = (r.Max.X+1)/2 - r.Min.X/2
|
||||
ch = h
|
||||
case image.YCbCrSubsampleRatio420:
|
||||
cw = (r.Max.X+1)/2 - r.Min.X/2
|
||||
ch = (r.Max.Y+1)/2 - r.Min.Y/2
|
||||
case image.YCbCrSubsampleRatio440:
|
||||
cw = w
|
||||
ch = (r.Max.Y+1)/2 - r.Min.Y/2
|
||||
default:
|
||||
// Default to 4:4:4 subsampling.
|
||||
cw = w
|
||||
ch = h
|
||||
}
|
||||
b := make([]byte, 2*w*h+2*cw*ch)
|
||||
// TODO: use s[i:j:k] notation to set the cap.
|
||||
return &Image{
|
||||
YCbCr: image.YCbCr{
|
||||
Y: b[:w*h],
|
||||
Cb: b[w*h+0*cw*ch : w*h+1*cw*ch],
|
||||
Cr: b[w*h+1*cw*ch : w*h+2*cw*ch],
|
||||
SubsampleRatio: subsampleRatio,
|
||||
YStride: w,
|
||||
CStride: cw,
|
||||
Rect: r,
|
||||
},
|
||||
A: b[w*h+2*cw*ch:],
|
||||
AStride: w,
|
||||
}
|
||||
}
|
26
vendor/sourcegraph.com/sourcegraph/s3cache/LICENSE
generated
vendored
26
vendor/sourcegraph.com/sourcegraph/s3cache/LICENSE
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
Copyright (c) 2013 The s3cache AUTHORS. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of Sourcegraph Inc. nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
127
vendor/sourcegraph.com/sourcegraph/s3cache/s3cache.go
generated
vendored
127
vendor/sourcegraph.com/sourcegraph/s3cache/s3cache.go
generated
vendored
|
@ -1,127 +0,0 @@
|
|||
// Package s3cache provides an implementation of httpcache.Cache that stores and
|
||||
// retrieves data using Amazon S3.
|
||||
package s3cache // import "sourcegraph.com/sourcegraph/s3cache"
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sqs/s3"
|
||||
"github.com/sqs/s3/s3util"
|
||||
)
|
||||
|
||||
// Cache objects store and retrieve data using Amazon S3.
|
||||
type Cache struct {
|
||||
// Config is the Amazon S3 configuration.
|
||||
Config s3util.Config
|
||||
|
||||
// BucketURL is the URL to the bucket on Amazon S3, which includes the
|
||||
// bucket name and the AWS region. Example:
|
||||
// "https://s3-us-west-2.amazonaws.com/mybucket".
|
||||
BucketURL string
|
||||
|
||||
// Gzip indicates whether cache entries should be gzipped in Set and
|
||||
// gunzipped in Get. If true, cache entry keys will have the suffix ".gz"
|
||||
// appended.
|
||||
Gzip bool
|
||||
}
|
||||
|
||||
var noLogErrors, _ = strconv.ParseBool(os.Getenv("NO_LOG_S3CACHE_ERRORS"))
|
||||
|
||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
||||
rdr, err := s3util.Open(c.url(key), &c.Config)
|
||||
if err != nil {
|
||||
return []byte{}, false
|
||||
}
|
||||
defer rdr.Close()
|
||||
if c.Gzip {
|
||||
rdr, err = gzip.NewReader(rdr)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
defer rdr.Close()
|
||||
}
|
||||
resp, err = ioutil.ReadAll(rdr)
|
||||
if err != nil {
|
||||
if !noLogErrors {
|
||||
log.Printf("s3cache.Get failed: %s", err)
|
||||
}
|
||||
}
|
||||
return resp, err == nil
|
||||
}
|
||||
|
||||
func (c *Cache) Set(key string, resp []byte) {
|
||||
w, err := s3util.Create(c.url(key), nil, &c.Config)
|
||||
if err != nil {
|
||||
if !noLogErrors {
|
||||
log.Printf("s3util.Create failed: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer w.Close()
|
||||
if c.Gzip {
|
||||
w = gzip.NewWriter(w)
|
||||
defer w.Close()
|
||||
}
|
||||
_, err = w.Write(resp)
|
||||
if err != nil {
|
||||
if !noLogErrors {
|
||||
log.Printf("s3cache.Set failed: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) Delete(key string) {
|
||||
rdr, err := s3util.Delete(c.url(key), &c.Config)
|
||||
if err != nil {
|
||||
if !noLogErrors {
|
||||
log.Printf("s3cache.Delete failed: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer rdr.Close()
|
||||
}
|
||||
|
||||
func (c *Cache) url(key string) string {
|
||||
key = cacheKeyToObjectKey(key)
|
||||
if c.Gzip {
|
||||
key += ".gz"
|
||||
}
|
||||
if strings.HasSuffix(c.BucketURL, "/") {
|
||||
return c.BucketURL + key
|
||||
}
|
||||
return c.BucketURL + "/" + key
|
||||
}
|
||||
|
||||
func cacheKeyToObjectKey(key string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, key)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// New returns a new Cache with underlying storage in Amazon S3. The bucketURL
|
||||
// is the full URL to the bucket on Amazon S3, including the bucket name and AWS
|
||||
// region (e.g., "https://s3-us-west-2.amazonaws.com/mybucket").
|
||||
//
|
||||
// The environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_KEY are used as the AWS
|
||||
// credentials. To use different credentials, modify the returned Cache object
|
||||
// or construct a Cache object manually.
|
||||
func New(bucketURL string) *Cache {
|
||||
return &Cache{
|
||||
Config: s3util.Config{
|
||||
Keys: &s3.Keys{
|
||||
AccessKey: os.Getenv("AWS_ACCESS_KEY_ID"),
|
||||
SecretKey: os.Getenv("AWS_SECRET_KEY"),
|
||||
},
|
||||
Service: s3.DefaultService,
|
||||
},
|
||||
BucketURL: bucketURL,
|
||||
}
|
||||
}
|
36
vendor/vendor.json
vendored
36
vendor/vendor.json
vendored
|
@ -340,18 +340,6 @@
|
|||
"revision": "12b6a0f7b3e676d459a9480e75df7efe576cfcb2",
|
||||
"revisionTime": "2017-09-08T20:30:58Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Th+zE6hHI4jpczGj+JNsqDJrJgI=",
|
||||
"path": "github.com/kr/http/transport",
|
||||
"revision": "77bd98b60462aab91988ae155ecc11a74833f5b7",
|
||||
"revisionTime": "2015-05-05T21:27:37Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Hc7LMQrHR71kkSsd/P/SBdiC258=",
|
||||
"path": "github.com/petar/GoLLRB/llrb",
|
||||
"revision": "53be0d36a84c2a886ca057d34b6aa4468df9ccb4",
|
||||
"revisionTime": "2013-04-27T21:51:48Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "GfnXm54E98jxQJMXPZz0LbPVaRc=",
|
||||
"path": "github.com/peterbourgon/diskv",
|
||||
|
@ -376,18 +364,6 @@
|
|||
"revision": "5bf94b69c6b68ee1b541973bb8e1144db23a194b",
|
||||
"revisionTime": "2017-03-21T23:07:31Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "keGfp7Lfr4cPUiZjLpHzZWrHEzM=",
|
||||
"path": "github.com/sqs/s3",
|
||||
"revision": "ee47412d98d9637046a7096f24809bc45f6bcbd5",
|
||||
"revisionTime": "2015-02-03T11:00:30Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ki3xti8X7mLGmAWZoYZiBAJYQ8M=",
|
||||
"path": "github.com/sqs/s3/s3util",
|
||||
"revision": "ee47412d98d9637046a7096f24809bc45f6bcbd5",
|
||||
"revisionTime": "2015-02-03T11:00:30Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "UD/pejajPyS7WaWVXq2NU1eK4Ic=",
|
||||
"path": "golang.org/x/image/bmp",
|
||||
|
@ -430,12 +406,6 @@
|
|||
"revision": "426cfd8eeb6e08ab1932954e09e3c2cb2bc6e36d",
|
||||
"revisionTime": "2017-05-14T06:33:48Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Q+QuePyosoyKLP7tHNe3iREV+mc=",
|
||||
"path": "golang.org/x/image/webp/nycbcra",
|
||||
"revision": "426cfd8eeb6e08ab1932954e09e3c2cb2bc6e36d",
|
||||
"revisionTime": "2017-05-14T06:33:48Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "dr5+PfIRzXeN+l1VG+s0lea9qz8=",
|
||||
"path": "golang.org/x/net/context",
|
||||
|
@ -712,12 +682,6 @@
|
|||
"revision": "8233e124e4634a8313f2c9a1ea7d4db33546b11b",
|
||||
"revisionTime": "2017-08-31T17:59:09Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Oz2aQiusOZOpefTB6nCKW+vzrWA=",
|
||||
"path": "sourcegraph.com/sourcegraph/s3cache",
|
||||
"revision": "4150cc6b046500fb69804e34aa4a1ca8be361bcb",
|
||||
"revisionTime": "2014-12-02T19:37:49Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "cBdDkft9ir+lyLbeRQ4zCAoR0AY=",
|
||||
"path": "willnorris.com/go/gifresize",
|
||||
|
|
Loading…
Add table
Reference in a new issue