Browse Source

vendor: Remove unused vendor packages (fixes #3595) (#5096)

Simon Frei 7 years ago
parent
commit
82c9e23206
100 changed files with 0 additions and 21007 deletions
  1. 0 25
      vendor/github.com/edsrzf/mmap-go/LICENSE
  2. 0 116
      vendor/github.com/edsrzf/mmap-go/mmap.go
  3. 0 67
      vendor/github.com/edsrzf/mmap-go/mmap_unix.go
  4. 0 125
      vendor/github.com/edsrzf/mmap-go/mmap_windows.go
  5. 0 8
      vendor/github.com/edsrzf/mmap-go/msync_netbsd.go
  6. 0 14
      vendor/github.com/edsrzf/mmap-go/msync_unix.go
  7. 0 23
      vendor/github.com/klauspost/reedsolomon/LICENSE
  8. 0 125
      vendor/github.com/klauspost/reedsolomon/examples/simple-decoder.go
  9. 0 112
      vendor/github.com/klauspost/reedsolomon/examples/simple-encoder.go
  10. 0 165
      vendor/github.com/klauspost/reedsolomon/examples/stream-decoder.go
  11. 0 142
      vendor/github.com/klauspost/reedsolomon/examples/stream-encoder.go
  12. 0 65
      vendor/github.com/klauspost/reedsolomon/galois.go
  13. 0 91
      vendor/github.com/klauspost/reedsolomon/galois_amd64.go
  14. 0 236
      vendor/github.com/klauspost/reedsolomon/galois_amd64.s
  15. 0 48
      vendor/github.com/klauspost/reedsolomon/galois_arm64.go
  16. 0 141
      vendor/github.com/klauspost/reedsolomon/galois_arm64.s
  17. 0 27
      vendor/github.com/klauspost/reedsolomon/galois_noasm.go
  18. 0 132
      vendor/github.com/klauspost/reedsolomon/gentables.go
  19. 0 160
      vendor/github.com/klauspost/reedsolomon/inversion_tree.go
  20. 0 279
      vendor/github.com/klauspost/reedsolomon/matrix.go
  21. 0 111
      vendor/github.com/klauspost/reedsolomon/options.go
  22. 0 884
      vendor/github.com/klauspost/reedsolomon/reedsolomon.go
  23. 0 584
      vendor/github.com/klauspost/reedsolomon/streaming.go
  24. 0 202
      vendor/github.com/minio/minio-go/LICENSE
  25. 0 629
      vendor/github.com/minio/minio-go/api-compose-object.go
  26. 0 84
      vendor/github.com/minio/minio-go/api-datatypes.go
  27. 0 286
      vendor/github.com/minio/minio-go/api-error-response.go
  28. 0 26
      vendor/github.com/minio/minio-go/api-get-object-context.go
  29. 0 136
      vendor/github.com/minio/minio-go/api-get-object-file.go
  30. 0 676
      vendor/github.com/minio/minio-go/api-get-object.go
  31. 0 126
      vendor/github.com/minio/minio-go/api-get-options.go
  32. 0 109
      vendor/github.com/minio/minio-go/api-get-policy.go
  33. 0 717
      vendor/github.com/minio/minio-go/api-list.go
  34. 0 230
      vendor/github.com/minio/minio-go/api-notification.go
  35. 0 213
      vendor/github.com/minio/minio-go/api-presigned.go
  36. 0 255
      vendor/github.com/minio/minio-go/api-put-bucket.go
  37. 0 111
      vendor/github.com/minio/minio-go/api-put-object-common.go
  38. 0 39
      vendor/github.com/minio/minio-go/api-put-object-context.go
  39. 0 23
      vendor/github.com/minio/minio-go/api-put-object-copy.go
  40. 0 44
      vendor/github.com/minio/minio-go/api-put-object-encrypted.go
  41. 0 64
      vendor/github.com/minio/minio-go/api-put-object-file-context.go
  42. 0 27
      vendor/github.com/minio/minio-go/api-put-object-file.go
  43. 0 373
      vendor/github.com/minio/minio-go/api-put-object-multipart.go
  44. 0 417
      vendor/github.com/minio/minio-go/api-put-object-streaming.go
  45. 0 258
      vendor/github.com/minio/minio-go/api-put-object.go
  46. 0 290
      vendor/github.com/minio/minio-go/api-remove.go
  47. 0 245
      vendor/github.com/minio/minio-go/api-s3-datatypes.go
  48. 0 178
      vendor/github.com/minio/minio-go/api-stat.go
  49. 0 832
      vendor/github.com/minio/minio-go/api.go
  50. 0 219
      vendor/github.com/minio/minio-go/bucket-cache.go
  51. 0 232
      vendor/github.com/minio/minio-go/bucket-notification.go
  52. 0 70
      vendor/github.com/minio/minio-go/constants.go
  53. 0 154
      vendor/github.com/minio/minio-go/core.go
  54. 0 227
      vendor/github.com/minio/minio-go/docs/validator.go
  55. 0 61
      vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go
  56. 0 52
      vendor/github.com/minio/minio-go/examples/s3/bucketexists.go
  57. 0 78
      vendor/github.com/minio/minio-go/examples/s3/composeobject.go
  58. 0 75
      vendor/github.com/minio/minio-go/examples/s3/copyobject.go
  59. 0 54
      vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go
  60. 0 46
      vendor/github.com/minio/minio-go/examples/s3/fgetobject.go
  61. 0 80
      vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go
  62. 0 53
      vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go
  63. 0 48
      vendor/github.com/minio/minio-go/examples/s3/fputobject.go
  64. 0 89
      vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go
  65. 0 56
      vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go
  66. 0 56
      vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go
  67. 0 73
      vendor/github.com/minio/minio-go/examples/s3/getobject-context.go
  68. 0 64
      vendor/github.com/minio/minio-go/examples/s3/getobject.go
  69. 0 57
      vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go
  70. 0 49
      vendor/github.com/minio/minio-go/examples/s3/listbuckets.go
  71. 0 58
      vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go
  72. 0 77
      vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go
  73. 0 58
      vendor/github.com/minio/minio-go/examples/s3/listobjects.go
  74. 0 58
      vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go
  75. 0 47
      vendor/github.com/minio/minio-go/examples/s3/makebucket.go
  76. 0 54
      vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go
  77. 0 54
      vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go
  78. 0 60
      vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go
  79. 0 48
      vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go
  80. 0 85
      vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go
  81. 0 68
      vendor/github.com/minio/minio-go/examples/s3/putobject-context.go
  82. 0 87
      vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go
  83. 0 64
      vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go
  84. 0 62
      vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go
  85. 0 55
      vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go
  86. 0 58
      vendor/github.com/minio/minio-go/examples/s3/putobject.go
  87. 0 50
      vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go
  88. 0 49
      vendor/github.com/minio/minio-go/examples/s3/removebucket.go
  89. 0 47
      vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go
  90. 0 46
      vendor/github.com/minio/minio-go/examples/s3/removeobject.go
  91. 0 65
      vendor/github.com/minio/minio-go/examples/s3/removeobjects.go
  92. 0 86
      vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go
  93. 0 55
      vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go
  94. 0 46
      vendor/github.com/minio/minio-go/examples/s3/statobject.go
  95. 0 6939
      vendor/github.com/minio/minio-go/functional_tests.go
  96. 0 71
      vendor/github.com/minio/minio-go/hook-reader.go
  97. 0 89
      vendor/github.com/minio/minio-go/pkg/credentials/chain.go
  98. 0 175
      vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
  99. 0 62
      vendor/github.com/minio/minio-go/pkg/credentials/doc.go
  100. 0 71
      vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go

+ 0 - 25
vendor/github.com/edsrzf/mmap-go/LICENSE

@@ -1,25 +0,0 @@
-Copyright (c) 2011, Evan Shaw <[email protected]>
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in the
-      documentation and/or other materials provided with the distribution.
-    * Neither the name of the copyright holder nor the
-      names of its contributors may be used to endorse or promote products
-      derived from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-

+ 0 - 116
vendor/github.com/edsrzf/mmap-go/mmap.go

@@ -1,116 +0,0 @@
-// Copyright 2011 Evan Shaw. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file defines the common package interface and contains a little bit of
-// factored out logic.
-
-// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface,
-// but doesn't go out of its way to abstract away every little platform detail.
-// This specifically means:
-//	* forked processes may or may not inherit mappings
-//	* a file's timestamp may or may not be updated by writes through mappings
-//	* specifying a size larger than the file's actual size can increase the file's size
-//	* If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms
-package mmap
-
-import (
-	"errors"
-	"os"
-	"reflect"
-	"unsafe"
-)
-
-const (
-	// RDONLY maps the memory read-only.
-	// Attempts to write to the MMap object will result in undefined behavior.
-	RDONLY = 0
-	// RDWR maps the memory as read-write. Writes to the MMap object will update the
-	// underlying file.
-	RDWR = 1 << iota
-	// COPY maps the memory as copy-on-write. Writes to the MMap object will affect
-	// memory, but the underlying file will remain unchanged.
-	COPY
-	// If EXEC is set, the mapped memory is marked as executable.
-	EXEC
-)
-
-const (
-	// If the ANON flag is set, the mapped memory will not be backed by a file.
-	ANON = 1 << iota
-)
-
-// MMap represents a file mapped into memory.
-type MMap []byte
-
-// Map maps an entire file into memory.
-// If ANON is set in flags, f is ignored.
-func Map(f *os.File, prot, flags int) (MMap, error) {
-	return MapRegion(f, -1, prot, flags, 0)
-}
-
-// MapRegion maps part of a file into memory.
-// The offset parameter must be a multiple of the system's page size.
-// If length < 0, the entire file will be mapped.
-// If ANON is set in flags, f is ignored.
-func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
-	if offset%int64(os.Getpagesize()) != 0 {
-		return nil, errors.New("offset parameter must be a multiple of the system's page size")
-	}
-
-	var fd uintptr
-	if flags&ANON == 0 {
-		fd = uintptr(f.Fd())
-		if length < 0 {
-			fi, err := f.Stat()
-			if err != nil {
-				return nil, err
-			}
-			length = int(fi.Size())
-		}
-	} else {
-		if length <= 0 {
-			return nil, errors.New("anonymous mapping requires non-zero length")
-		}
-		fd = ^uintptr(0)
-	}
-	return mmap(length, uintptr(prot), uintptr(flags), fd, offset)
-}
-
-func (m *MMap) header() *reflect.SliceHeader {
-	return (*reflect.SliceHeader)(unsafe.Pointer(m))
-}
-
-// Lock keeps the mapped region in physical memory, ensuring that it will not be
-// swapped out.
-func (m MMap) Lock() error {
-	dh := m.header()
-	return lock(dh.Data, uintptr(dh.Len))
-}
-
-// Unlock reverses the effect of Lock, allowing the mapped region to potentially
-// be swapped out.
-// If m is already unlocked, aan error will result.
-func (m MMap) Unlock() error {
-	dh := m.header()
-	return unlock(dh.Data, uintptr(dh.Len))
-}
-
-// Flush synchronizes the mapping's contents to the file's contents on disk.
-func (m MMap) Flush() error {
-	dh := m.header()
-	return flush(dh.Data, uintptr(dh.Len))
-}
-
-// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
-// m to nil.
-// Trying to read or write any remaining references to m after Unmap is called will
-// result in undefined behavior.
-// Unmap should only be called on the slice value that was originally returned from
-// a call to Map. Calling Unmap on a derived slice may cause errors.
-func (m *MMap) Unmap() error {
-	dh := m.header()
-	err := unmap(dh.Data, uintptr(dh.Len))
-	*m = nil
-	return err
-}

+ 0 - 67
vendor/github.com/edsrzf/mmap-go/mmap_unix.go

@@ -1,67 +0,0 @@
-// Copyright 2011 Evan Shaw. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux openbsd solaris netbsd
-
-package mmap
-
-import (
-	"syscall"
-)
-
-func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
-	flags := syscall.MAP_SHARED
-	prot := syscall.PROT_READ
-	switch {
-	case inprot&COPY != 0:
-		prot |= syscall.PROT_WRITE
-		flags = syscall.MAP_PRIVATE
-	case inprot&RDWR != 0:
-		prot |= syscall.PROT_WRITE
-	}
-	if inprot&EXEC != 0 {
-		prot |= syscall.PROT_EXEC
-	}
-	if inflags&ANON != 0 {
-		flags |= syscall.MAP_ANON
-	}
-
-	b, err := syscall.Mmap(int(fd), off, len, prot, flags)
-	if err != nil {
-		return nil, err
-	}
-	return b, nil
-}
-
-func flush(addr, len uintptr) error {
-	_, _, errno := syscall.Syscall(_SYS_MSYNC, addr, len, _MS_SYNC)
-	if errno != 0 {
-		return syscall.Errno(errno)
-	}
-	return nil
-}
-
-func lock(addr, len uintptr) error {
-	_, _, errno := syscall.Syscall(syscall.SYS_MLOCK, addr, len, 0)
-	if errno != 0 {
-		return syscall.Errno(errno)
-	}
-	return nil
-}
-
-func unlock(addr, len uintptr) error {
-	_, _, errno := syscall.Syscall(syscall.SYS_MUNLOCK, addr, len, 0)
-	if errno != 0 {
-		return syscall.Errno(errno)
-	}
-	return nil
-}
-
-func unmap(addr, len uintptr) error {
-	_, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, addr, len, 0)
-	if errno != 0 {
-		return syscall.Errno(errno)
-	}
-	return nil
-}

+ 0 - 125
vendor/github.com/edsrzf/mmap-go/mmap_windows.go

@@ -1,125 +0,0 @@
-// Copyright 2011 Evan Shaw. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mmap
-
-import (
-	"errors"
-	"os"
-	"sync"
-	"syscall"
-)
-
-// mmap on Windows is a two-step process.
-// First, we call CreateFileMapping to get a handle.
-// Then, we call MapviewToFile to get an actual pointer into memory.
-// Because we want to emulate a POSIX-style mmap, we don't want to expose
-// the handle -- only the pointer. We also want to return only a byte slice,
-// not a struct, so it's convenient to manipulate.
-
-// We keep this map so that we can get back the original handle from the memory address.
-var handleLock sync.Mutex
-var handleMap = map[uintptr]syscall.Handle{}
-
-func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) {
-	flProtect := uint32(syscall.PAGE_READONLY)
-	dwDesiredAccess := uint32(syscall.FILE_MAP_READ)
-	switch {
-	case prot&COPY != 0:
-		flProtect = syscall.PAGE_WRITECOPY
-		dwDesiredAccess = syscall.FILE_MAP_COPY
-	case prot&RDWR != 0:
-		flProtect = syscall.PAGE_READWRITE
-		dwDesiredAccess = syscall.FILE_MAP_WRITE
-	}
-	if prot&EXEC != 0 {
-		flProtect <<= 4
-		dwDesiredAccess |= syscall.FILE_MAP_EXECUTE
-	}
-
-	// The maximum size is the area of the file, starting from 0,
-	// that we wish to allow to be mappable. It is the sum of
-	// the length the user requested, plus the offset where that length
-	// is starting from. This does not map the data into memory.
-	maxSizeHigh := uint32((off + int64(len)) >> 32)
-	maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF)
-	// TODO: Do we need to set some security attributes? It might help portability.
-	h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil)
-	if h == 0 {
-		return nil, os.NewSyscallError("CreateFileMapping", errno)
-	}
-
-	// Actually map a view of the data into memory. The view's size
-	// is the length the user requested.
-	fileOffsetHigh := uint32(off >> 32)
-	fileOffsetLow := uint32(off & 0xFFFFFFFF)
-	addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len))
-	if addr == 0 {
-		return nil, os.NewSyscallError("MapViewOfFile", errno)
-	}
-	handleLock.Lock()
-	handleMap[addr] = h
-	handleLock.Unlock()
-
-	m := MMap{}
-	dh := m.header()
-	dh.Data = addr
-	dh.Len = len
-	dh.Cap = dh.Len
-
-	return m, nil
-}
-
-func flush(addr, len uintptr) error {
-	errno := syscall.FlushViewOfFile(addr, len)
-	if errno != nil {
-		return os.NewSyscallError("FlushViewOfFile", errno)
-	}
-
-	handleLock.Lock()
-	defer handleLock.Unlock()
-	handle, ok := handleMap[addr]
-	if !ok {
-		// should be impossible; we would've errored above
-		return errors.New("unknown base address")
-	}
-
-	errno = syscall.FlushFileBuffers(handle)
-	return os.NewSyscallError("FlushFileBuffers", errno)
-}
-
-func lock(addr, len uintptr) error {
-	errno := syscall.VirtualLock(addr, len)
-	return os.NewSyscallError("VirtualLock", errno)
-}
-
-func unlock(addr, len uintptr) error {
-	errno := syscall.VirtualUnlock(addr, len)
-	return os.NewSyscallError("VirtualUnlock", errno)
-}
-
-func unmap(addr, len uintptr) error {
-	flush(addr, len)
-	// Lock the UnmapViewOfFile along with the handleMap deletion.
-	// As soon as we unmap the view, the OS is free to give the
-	// same addr to another new map. We don't want another goroutine
-	// to insert and remove the same addr into handleMap while
-	// we're trying to remove our old addr/handle pair.
-	handleLock.Lock()
-	defer handleLock.Unlock()
-	err := syscall.UnmapViewOfFile(addr)
-	if err != nil {
-		return err
-	}
-
-	handle, ok := handleMap[addr]
-	if !ok {
-		// should be impossible; we would've errored above
-		return errors.New("unknown base address")
-	}
-	delete(handleMap, addr)
-
-	e := syscall.CloseHandle(syscall.Handle(handle))
-	return os.NewSyscallError("CloseHandle", e)
-}

+ 0 - 8
vendor/github.com/edsrzf/mmap-go/msync_netbsd.go

@@ -1,8 +0,0 @@
-// Copyright 2011 Evan Shaw. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package mmap
-
-const _SYS_MSYNC = 277
-const _MS_SYNC = 0x04

+ 0 - 14
vendor/github.com/edsrzf/mmap-go/msync_unix.go

@@ -1,14 +0,0 @@
-// Copyright 2011 Evan Shaw. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux openbsd solaris
-
-package mmap
-
-import (
-	"syscall"
-)
-
-const _SYS_MSYNC = syscall.SYS_MSYNC
-const _MS_SYNC = syscall.MS_SYNC

+ 0 - 23
vendor/github.com/klauspost/reedsolomon/LICENSE

@@ -1,23 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Klaus Post
-Copyright (c) 2015 Backblaze
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-

+ 0 - 125
vendor/github.com/klauspost/reedsolomon/examples/simple-decoder.go

@@ -1,125 +0,0 @@
-//+build ignore
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-//
-// Simple decoder example.
-//
-// The decoder reverses the process of "simple-encoder.go"
-//
-// To build an executable use:
-//
-// go build simple-decoder.go
-//
-// Simple Encoder/Decoder Shortcomings:
-// * If the file size of the input isn't diviable by the number of data shards
-//   the output will contain extra zeroes
-//
-// * If the shard numbers isn't the same for the decoder as in the
-//   encoder, invalid output will be generated.
-//
-// * If values have changed in a shard, it cannot be reconstructed.
-//
-// * If two shards have been swapped, reconstruction will always fail.
-//   You need to supply the shards in the same order as they were given to you.
-//
-// The solution for this is to save a metadata file containing:
-//
-// * File size.
-// * The number of data/parity shards.
-// * HASH of each shard.
-// * Order of the shards.
-//
-// If you save these properties, you should abe able to detect file corruption
-// in a shard and be able to reconstruct your data if you have the needed number of shards left.
-
-package main
-
-import (
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"os"
-
-	"github.com/klauspost/reedsolomon"
-)
-
-var dataShards = flag.Int("data", 4, "Number of shards to split the data into")
-var parShards = flag.Int("par", 2, "Number of parity shards")
-var outFile = flag.String("out", "", "Alternative output path/file")
-
-func init() {
-	flag.Usage = func() {
-		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
-		fmt.Fprintf(os.Stderr, "  simple-decoder [-flags] basefile.ext\nDo not add the number to the filename.\n")
-		fmt.Fprintf(os.Stderr, "Valid flags:\n")
-		flag.PrintDefaults()
-	}
-}
-
-func main() {
-	// Parse flags
-	flag.Parse()
-	args := flag.Args()
-	if len(args) != 1 {
-		fmt.Fprintf(os.Stderr, "Error: No filenames given\n")
-		flag.Usage()
-		os.Exit(1)
-	}
-	fname := args[0]
-
-	// Create matrix
-	enc, err := reedsolomon.New(*dataShards, *parShards)
-	checkErr(err)
-
-	// Create shards and load the data.
-	shards := make([][]byte, *dataShards+*parShards)
-	for i := range shards {
-		infn := fmt.Sprintf("%s.%d", fname, i)
-		fmt.Println("Opening", infn)
-		shards[i], err = ioutil.ReadFile(infn)
-		if err != nil {
-			fmt.Println("Error reading file", err)
-			shards[i] = nil
-		}
-	}
-
-	// Verify the shards
-	ok, err := enc.Verify(shards)
-	if ok {
-		fmt.Println("No reconstruction needed")
-	} else {
-		fmt.Println("Verification failed. Reconstructing data")
-		err = enc.Reconstruct(shards)
-		if err != nil {
-			fmt.Println("Reconstruct failed -", err)
-			os.Exit(1)
-		}
-		ok, err = enc.Verify(shards)
-		if !ok {
-			fmt.Println("Verification failed after reconstruction, data likely corrupted.")
-			os.Exit(1)
-		}
-		checkErr(err)
-	}
-
-	// Join the shards and write them
-	outfn := *outFile
-	if outfn == "" {
-		outfn = fname
-	}
-
-	fmt.Println("Writing data to", outfn)
-	f, err := os.Create(outfn)
-	checkErr(err)
-
-	// We don't know the exact filesize.
-	err = enc.Join(f, shards, len(shards[0])**dataShards)
-	checkErr(err)
-}
-
-func checkErr(err error) {
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
-		os.Exit(2)
-	}
-}

+ 0 - 112
vendor/github.com/klauspost/reedsolomon/examples/simple-encoder.go

@@ -1,112 +0,0 @@
-//+build ignore
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-//
-// Simple encoder example
-//
-// The encoder encodes a simgle file into a number of shards
-// To reverse the process see "simpledecoder.go"
-//
-// To build an executable use:
-//
-// go build simple-decoder.go
-//
-// Simple Encoder/Decoder Shortcomings:
-// * If the file size of the input isn't diviable by the number of data shards
-//   the output will contain extra zeroes
-//
-// * If the shard numbers isn't the same for the decoder as in the
-//   encoder, invalid output will be generated.
-//
-// * If values have changed in a shard, it cannot be reconstructed.
-//
-// * If two shards have been swapped, reconstruction will always fail.
-//   You need to supply the shards in the same order as they were given to you.
-//
-// The solution for this is to save a metadata file containing:
-//
-// * File size.
-// * The number of data/parity shards.
-// * HASH of each shard.
-// * Order of the shards.
-//
-// If you save these properties, you should abe able to detect file corruption
-// in a shard and be able to reconstruct your data if you have the needed number of shards left.
-
-package main
-
-import (
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-
-	"github.com/klauspost/reedsolomon"
-)
-
-var dataShards = flag.Int("data", 4, "Number of shards to split the data into, must be below 257.")
-var parShards = flag.Int("par", 2, "Number of parity shards")
-var outDir = flag.String("out", "", "Alternative output directory")
-
-func init() {
-	flag.Usage = func() {
-		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
-		fmt.Fprintf(os.Stderr, "  simple-encoder [-flags] filename.ext\n\n")
-		fmt.Fprintf(os.Stderr, "Valid flags:\n")
-		flag.PrintDefaults()
-	}
-}
-
-func main() {
-	// Parse command line parameters.
-	flag.Parse()
-	args := flag.Args()
-	if len(args) != 1 {
-		fmt.Fprintf(os.Stderr, "Error: No input filename given\n")
-		flag.Usage()
-		os.Exit(1)
-	}
-	if *dataShards > 257 {
-		fmt.Fprintf(os.Stderr, "Error: Too many data shards\n")
-		os.Exit(1)
-	}
-	fname := args[0]
-
-	// Create encoding matrix.
-	enc, err := reedsolomon.New(*dataShards, *parShards)
-	checkErr(err)
-
-	fmt.Println("Opening", fname)
-	b, err := ioutil.ReadFile(fname)
-	checkErr(err)
-
-	// Split the file into equally sized shards.
-	shards, err := enc.Split(b)
-	checkErr(err)
-	fmt.Printf("File split into %d data+parity shards with %d bytes/shard.\n", len(shards), len(shards[0]))
-
-	// Encode parity
-	err = enc.Encode(shards)
-	checkErr(err)
-
-	// Write out the resulting files.
-	dir, file := filepath.Split(fname)
-	if *outDir != "" {
-		dir = *outDir
-	}
-	for i, shard := range shards {
-		outfn := fmt.Sprintf("%s.%d", file, i)
-
-		fmt.Println("Writing to", outfn)
-		err = ioutil.WriteFile(filepath.Join(dir, outfn), shard, os.ModePerm)
-		checkErr(err)
-	}
-}
-
-func checkErr(err error) {
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
-		os.Exit(2)
-	}
-}

+ 0 - 165
vendor/github.com/klauspost/reedsolomon/examples/stream-decoder.go

@@ -1,165 +0,0 @@
-//+build ignore
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-//
-// Stream decoder example.
-//
-// The decoder reverses the process of "stream-encoder.go"
-//
-// To build an executable use:
-//
-// go build stream-decoder.go
-//
-// Simple Encoder/Decoder Shortcomings:
-// * If the file size of the input isn't dividable by the number of data shards
-//   the output will contain extra zeroes
-//
-// * If the shard numbers isn't the same for the decoder as in the
-//   encoder, invalid output will be generated.
-//
-// * If values have changed in a shard, it cannot be reconstructed.
-//
-// * If two shards have been swapped, reconstruction will always fail.
-//   You need to supply the shards in the same order as they were given to you.
-//
-// The solution for this is to save a metadata file containing:
-//
-// * File size.
-// * The number of data/parity shards.
-// * HASH of each shard.
-// * Order of the shards.
-//
-// If you save these properties, you should abe able to detect file corruption
-// in a shard and be able to reconstruct your data if you have the needed number of shards left.
-
-package main
-
-import (
-	"flag"
-	"fmt"
-	"io"
-	"os"
-
-	"github.com/klauspost/reedsolomon"
-)
-
-var dataShards = flag.Int("data", 4, "Number of shards to split the data into")
-var parShards = flag.Int("par", 2, "Number of parity shards")
-var outFile = flag.String("out", "", "Alternative output path/file")
-
-func init() {
-	flag.Usage = func() {
-		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
-		fmt.Fprintf(os.Stderr, "  %s [-flags] basefile.ext\nDo not add the number to the filename.\n", os.Args[0])
-		fmt.Fprintf(os.Stderr, "Valid flags:\n")
-		flag.PrintDefaults()
-	}
-}
-
-func main() {
-	// Parse flags
-	flag.Parse()
-	args := flag.Args()
-	if len(args) != 1 {
-		fmt.Fprintf(os.Stderr, "Error: No filenames given\n")
-		flag.Usage()
-		os.Exit(1)
-	}
-	fname := args[0]
-
-	// Create matrix
-	enc, err := reedsolomon.NewStream(*dataShards, *parShards)
-	checkErr(err)
-
-	// Open the inputs
-	shards, size, err := openInput(*dataShards, *parShards, fname)
-	checkErr(err)
-
-	// Verify the shards
-	ok, err := enc.Verify(shards)
-	if ok {
-		fmt.Println("No reconstruction needed")
-	} else {
-		fmt.Println("Verification failed. Reconstructing data")
-		shards, size, err = openInput(*dataShards, *parShards, fname)
-		checkErr(err)
-		// Create out destination writers
-		out := make([]io.Writer, len(shards))
-		for i := range out {
-			if shards[i] == nil {
-				outfn := fmt.Sprintf("%s.%d", fname, i)
-				fmt.Println("Creating", outfn)
-				out[i], err = os.Create(outfn)
-				checkErr(err)
-			}
-		}
-		err = enc.Reconstruct(shards, out)
-		if err != nil {
-			fmt.Println("Reconstruct failed -", err)
-			os.Exit(1)
-		}
-		// Close output.
-		for i := range out {
-			if out[i] != nil {
-				err := out[i].(*os.File).Close()
-				checkErr(err)
-			}
-		}
-		shards, size, err = openInput(*dataShards, *parShards, fname)
-		ok, err = enc.Verify(shards)
-		if !ok {
-			fmt.Println("Verification failed after reconstruction, data likely corrupted:", err)
-			os.Exit(1)
-		}
-		checkErr(err)
-	}
-
-	// Join the shards and write them
-	outfn := *outFile
-	if outfn == "" {
-		outfn = fname
-	}
-
-	fmt.Println("Writing data to", outfn)
-	f, err := os.Create(outfn)
-	checkErr(err)
-
-	shards, size, err = openInput(*dataShards, *parShards, fname)
-	checkErr(err)
-
-	// We don't know the exact filesize.
-	err = enc.Join(f, shards, int64(*dataShards)*size)
-	checkErr(err)
-}
-
-func openInput(dataShards, parShards int, fname string) (r []io.Reader, size int64, err error) {
-	// Create shards and load the data.
-	shards := make([]io.Reader, dataShards+parShards)
-	for i := range shards {
-		infn := fmt.Sprintf("%s.%d", fname, i)
-		fmt.Println("Opening", infn)
-		f, err := os.Open(infn)
-		if err != nil {
-			fmt.Println("Error reading file", err)
-			shards[i] = nil
-			continue
-		} else {
-			shards[i] = f
-		}
-		stat, err := f.Stat()
-		checkErr(err)
-		if stat.Size() > 0 {
-			size = stat.Size()
-		} else {
-			shards[i] = nil
-		}
-	}
-	return shards, size, nil
-}
-
-func checkErr(err error) {
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
-		os.Exit(2)
-	}
-}

+ 0 - 142
vendor/github.com/klauspost/reedsolomon/examples/stream-encoder.go

@@ -1,142 +0,0 @@
-//+build ignore
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-//
-// Simple stream encoder example
-//
-// The encoder encodes a single file into a number of shards
-// To reverse the process see "stream-decoder.go"
-//
-// To build an executable use:
-//
-// go build stream-encoder.go
-//
-// Simple Encoder/Decoder Shortcomings:
-// * If the file size of the input isn't dividable by the number of data shards
-//   the output will contain extra zeroes
-//
-// * If the shard numbers isn't the same for the decoder as in the
-//   encoder, invalid output will be generated.
-//
-// * If values have changed in a shard, it cannot be reconstructed.
-//
-// * If two shards have been swapped, reconstruction will always fail.
-//   You need to supply the shards in the same order as they were given to you.
-//
-// The solution for this is to save a metadata file containing:
-//
-// * File size.
-// * The number of data/parity shards.
-// * HASH of each shard.
-// * Order of the shards.
-//
-// If you save these properties, you should abe able to detect file corruption
-// in a shard and be able to reconstruct your data if you have the needed number of shards left.
-
-package main
-
-import (
-	"flag"
-	"fmt"
-	"os"
-	"path/filepath"
-
-	"io"
-
-	"github.com/klauspost/reedsolomon"
-)
-
-var dataShards = flag.Int("data", 4, "Number of shards to split the data into, must be below 257.")
-var parShards = flag.Int("par", 2, "Number of parity shards")
-var outDir = flag.String("out", "", "Alternative output directory")
-
-func init() {
-	flag.Usage = func() {
-		fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
-		fmt.Fprintf(os.Stderr, "  %s [-flags] filename.ext\n\n", os.Args[0])
-		fmt.Fprintf(os.Stderr, "Valid flags:\n")
-		flag.PrintDefaults()
-	}
-}
-
-func main() {
-	// Parse command line parameters.
-	flag.Parse()
-	args := flag.Args()
-	if len(args) != 1 {
-		fmt.Fprintf(os.Stderr, "Error: No input filename given\n")
-		flag.Usage()
-		os.Exit(1)
-	}
-	if *dataShards > 257 {
-		fmt.Fprintf(os.Stderr, "Error: Too many data shards\n")
-		os.Exit(1)
-	}
-	fname := args[0]
-
-	// Create encoding matrix.
-	enc, err := reedsolomon.NewStream(*dataShards, *parShards)
-	checkErr(err)
-
-	fmt.Println("Opening", fname)
-	f, err := os.Open(fname)
-	checkErr(err)
-
-	instat, err := f.Stat()
-	checkErr(err)
-
-	shards := *dataShards + *parShards
-	out := make([]*os.File, shards)
-
-	// Create the resulting files.
-	dir, file := filepath.Split(fname)
-	if *outDir != "" {
-		dir = *outDir
-	}
-	for i := range out {
-		outfn := fmt.Sprintf("%s.%d", file, i)
-		fmt.Println("Creating", outfn)
-		out[i], err = os.Create(filepath.Join(dir, outfn))
-		checkErr(err)
-	}
-
-	// Split into files.
-	data := make([]io.Writer, *dataShards)
-	for i := range data {
-		data[i] = out[i]
-	}
-	// Do the split
-	err = enc.Split(f, data, instat.Size())
-	checkErr(err)
-
-	// Close and re-open the files.
-	input := make([]io.Reader, *dataShards)
-
-	for i := range data {
-		out[i].Close()
-		f, err := os.Open(out[i].Name())
-		checkErr(err)
-		input[i] = f
-		defer f.Close()
-	}
-
-	// Create parity output writers
-	parity := make([]io.Writer, *parShards)
-	for i := range parity {
-		parity[i] = out[*dataShards+i]
-		defer out[*dataShards+i].Close()
-	}
-
-	// Encode parity
-	err = enc.Encode(input, parity)
-	checkErr(err)
-	fmt.Printf("File split into %d data + %d parity shards.\n", *dataShards, *parShards)
-
-}
-
-func checkErr(err error) {
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
-		os.Exit(2)
-	}
-}

File diff suppressed because it is too large
+ 0 - 65
vendor/github.com/klauspost/reedsolomon/galois.go


+ 0 - 91
vendor/github.com/klauspost/reedsolomon/galois_amd64.go

@@ -1,91 +0,0 @@
-//+build !noasm
-//+build !appengine
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-
-package reedsolomon
-
-//go:noescape
-func galMulSSSE3(low, high, in, out []byte)
-
-//go:noescape
-func galMulSSSE3Xor(low, high, in, out []byte)
-
-//go:noescape
-func galMulAVX2Xor(low, high, in, out []byte)
-
-//go:noescape
-func galMulAVX2(low, high, in, out []byte)
-
-//go:noescape
-func sSE2XorSlice(in, out []byte)
-
-// This is what the assembler routines do in blocks of 16 bytes:
-/*
-func galMulSSSE3(low, high, in, out []byte) {
-	for n, input := range in {
-		l := input & 0xf
-		h := input >> 4
-		out[n] = low[l] ^ high[h]
-	}
-}
-
-func galMulSSSE3Xor(low, high, in, out []byte) {
-	for n, input := range in {
-		l := input & 0xf
-		h := input >> 4
-		out[n] ^= low[l] ^ high[h]
-	}
-}
-*/
-
-func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
-	var done int
-	if avx2 {
-		galMulAVX2(mulTableLow[c][:], mulTableHigh[c][:], in, out)
-		done = (len(in) >> 5) << 5
-	} else if ssse3 {
-		galMulSSSE3(mulTableLow[c][:], mulTableHigh[c][:], in, out)
-		done = (len(in) >> 4) << 4
-	}
-	remain := len(in) - done
-	if remain > 0 {
-		mt := mulTable[c]
-		for i := done; i < len(in); i++ {
-			out[i] = mt[in[i]]
-		}
-	}
-}
-
-func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
-	var done int
-	if avx2 {
-		galMulAVX2Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
-		done = (len(in) >> 5) << 5
-	} else if ssse3 {
-		galMulSSSE3Xor(mulTableLow[c][:], mulTableHigh[c][:], in, out)
-		done = (len(in) >> 4) << 4
-	}
-	remain := len(in) - done
-	if remain > 0 {
-		mt := mulTable[c]
-		for i := done; i < len(in); i++ {
-			out[i] ^= mt[in[i]]
-		}
-	}
-}
-
-// slice galois add
-func sliceXor(in, out []byte, sse2 bool) {
-	var done int
-	if sse2 {
-		sSE2XorSlice(in, out)
-		done = (len(in) >> 4) << 4
-	}
-	remain := len(in) - done
-	if remain > 0 {
-		for i := done; i < len(in); i++ {
-			out[i] ^= in[i]
-		}
-	}
-}

+ 0 - 236
vendor/github.com/klauspost/reedsolomon/galois_amd64.s

@@ -1,236 +0,0 @@
-//+build !noasm !appengine
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-
-// Based on http://www.snia.org/sites/default/files2/SDC2013/presentations/NewThinking/EthanMiller_Screaming_Fast_Galois_Field%20Arithmetic_SIMD%20Instructions.pdf
-// and http://jerasure.org/jerasure/gf-complete/tree/master
-
-// func galMulSSSE3Xor(low, high, in, out []byte)
-TEXT ·galMulSSSE3Xor(SB), 7, $0
-	MOVQ   low+0(FP), SI     // SI: &low
-	MOVQ   high+24(FP), DX   // DX: &high
-	MOVOU  (SI), X6          // X6 low
-	MOVOU  (DX), X7          // X7: high
-	MOVQ   $15, BX           // BX: low mask
-	MOVQ   BX, X8
-	PXOR   X5, X5
-	MOVQ   in+48(FP), SI     // R11: &in
-	MOVQ   in_len+56(FP), R9 // R9: len(in)
-	MOVQ   out+72(FP), DX    // DX: &out
-	PSHUFB X5, X8            // X8: lomask (unpacked)
-	SHRQ   $4, R9            // len(in) / 16
-	MOVQ   SI, AX
-	MOVQ   DX, BX
-	ANDQ   $15, AX
-	ANDQ   $15, BX
-	CMPQ   R9, $0
-	JEQ    done_xor
-	ORQ    AX, BX
-	CMPQ   BX, $0
-	JNZ    loopback_xor
-
-loopback_xor_aligned:
-	MOVOA  (SI), X0             // in[x]
-	MOVOA  (DX), X4             // out[x]
-	MOVOA  X0, X1               // in[x]
-	MOVOA  X6, X2               // low copy
-	MOVOA  X7, X3               // high copy
-	PSRLQ  $4, X1               // X1: high input
-	PAND   X8, X0               // X0: low input
-	PAND   X8, X1               // X0: high input
-	PSHUFB X0, X2               // X2: mul low part
-	PSHUFB X1, X3               // X3: mul high part
-	PXOR   X2, X3               // X3: Result
-	PXOR   X4, X3               // X3: Result xor existing out
-	MOVOA  X3, (DX)             // Store
-	ADDQ   $16, SI              // in+=16
-	ADDQ   $16, DX              // out+=16
-	SUBQ   $1, R9
-	JNZ    loopback_xor_aligned
-	JMP    done_xor
-
-loopback_xor:
-	MOVOU  (SI), X0     // in[x]
-	MOVOU  (DX), X4     // out[x]
-	MOVOU  X0, X1       // in[x]
-	MOVOU  X6, X2       // low copy
-	MOVOU  X7, X3       // high copy
-	PSRLQ  $4, X1       // X1: high input
-	PAND   X8, X0       // X0: low input
-	PAND   X8, X1       // X0: high input
-	PSHUFB X0, X2       // X2: mul low part
-	PSHUFB X1, X3       // X3: mul high part
-	PXOR   X2, X3       // X3: Result
-	PXOR   X4, X3       // X3: Result xor existing out
-	MOVOU  X3, (DX)     // Store
-	ADDQ   $16, SI      // in+=16
-	ADDQ   $16, DX      // out+=16
-	SUBQ   $1, R9
-	JNZ    loopback_xor
-
-done_xor:
-	RET
-
-// func galMulSSSE3(low, high, in, out []byte)
-TEXT ·galMulSSSE3(SB), 7, $0
-	MOVQ   low+0(FP), SI     // SI: &low
-	MOVQ   high+24(FP), DX   // DX: &high
-	MOVOU  (SI), X6          // X6 low
-	MOVOU  (DX), X7          // X7: high
-	MOVQ   $15, BX           // BX: low mask
-	MOVQ   BX, X8
-	PXOR   X5, X5
-	MOVQ   in+48(FP), SI     // R11: &in
-	MOVQ   in_len+56(FP), R9 // R9: len(in)
-	MOVQ   out+72(FP), DX    // DX: &out
-	PSHUFB X5, X8            // X8: lomask (unpacked)
-	MOVQ   SI, AX
-	MOVQ   DX, BX
-	SHRQ   $4, R9            // len(in) / 16
-	ANDQ   $15, AX
-	ANDQ   $15, BX
-	CMPQ   R9, $0
-	JEQ    done
-	ORQ    AX, BX
-	CMPQ   BX, $0
-	JNZ    loopback
-
-loopback_aligned:
-	MOVOA  (SI), X0         // in[x]
-	MOVOA  X0, X1           // in[x]
-	MOVOA  X6, X2           // low copy
-	MOVOA  X7, X3           // high copy
-	PSRLQ  $4, X1           // X1: high input
-	PAND   X8, X0           // X0: low input
-	PAND   X8, X1           // X0: high input
-	PSHUFB X0, X2           // X2: mul low part
-	PSHUFB X1, X3           // X3: mul high part
-	PXOR   X2, X3           // X3: Result
-	MOVOA  X3, (DX)         // Store
-	ADDQ   $16, SI          // in+=16
-	ADDQ   $16, DX          // out+=16
-	SUBQ   $1, R9
-	JNZ    loopback_aligned
-	JMP    done
-
-loopback:
-	MOVOU  (SI), X0 // in[x]
-	MOVOU  X0, X1   // in[x]
-	MOVOA  X6, X2   // low copy
-	MOVOA  X7, X3   // high copy
-	PSRLQ  $4, X1   // X1: high input
-	PAND   X8, X0   // X0: low input
-	PAND   X8, X1   // X0: high input
-	PSHUFB X0, X2   // X2: mul low part
-	PSHUFB X1, X3   // X3: mul high part
-	PXOR   X2, X3   // X3: Result
-	MOVOU  X3, (DX) // Store
-	ADDQ   $16, SI  // in+=16
-	ADDQ   $16, DX  // out+=16
-	SUBQ   $1, R9
-	JNZ    loopback
-
-done:
-	RET
-
-// func galMulAVX2Xor(low, high, in, out []byte)
-TEXT ·galMulAVX2Xor(SB), 7, $0
-	MOVQ  low+0(FP), SI     // SI: &low
-	MOVQ  high+24(FP), DX   // DX: &high
-	MOVQ  $15, BX           // BX: low mask
-	MOVQ  BX, X5
-	MOVOU (SI), X6          // X6: low
-	MOVOU (DX), X7          // X7: high
-	MOVQ  in_len+56(FP), R9 // R9: len(in)
-
-	VINSERTI128  $1, X6, Y6, Y6 // low
-	VINSERTI128  $1, X7, Y7, Y7 // high
-	VPBROADCASTB X5, Y8         // Y8: lomask (unpacked)
-
-	SHRQ  $5, R9         // len(in) / 32
-	MOVQ  out+72(FP), DX // DX: &out
-	MOVQ  in+48(FP), SI  // SI: &in
-	TESTQ R9, R9
-	JZ    done_xor_avx2
-
-loopback_xor_avx2:
-	VMOVDQU (SI), Y0
-	VMOVDQU (DX), Y4
-	VPSRLQ  $4, Y0, Y1 // Y1: high input
-	VPAND   Y8, Y0, Y0 // Y0: low input
-	VPAND   Y8, Y1, Y1 // Y1: high input
-	VPSHUFB Y0, Y6, Y2 // Y2: mul low part
-	VPSHUFB Y1, Y7, Y3 // Y3: mul high part
-	VPXOR   Y3, Y2, Y3 // Y3: Result
-	VPXOR   Y4, Y3, Y4 // Y4: Result
-	VMOVDQU Y4, (DX)
-
-	ADDQ $32, SI           // in+=32
-	ADDQ $32, DX           // out+=32
-	SUBQ $1, R9
-	JNZ  loopback_xor_avx2
-
-done_xor_avx2:
-	VZEROUPPER
-	RET
-
-// func galMulAVX2(low, high, in, out []byte)
-TEXT ·galMulAVX2(SB), 7, $0
-	MOVQ  low+0(FP), SI     // SI: &low
-	MOVQ  high+24(FP), DX   // DX: &high
-	MOVQ  $15, BX           // BX: low mask
-	MOVQ  BX, X5
-	MOVOU (SI), X6          // X6: low
-	MOVOU (DX), X7          // X7: high
-	MOVQ  in_len+56(FP), R9 // R9: len(in)
-
-	VINSERTI128  $1, X6, Y6, Y6 // low
-	VINSERTI128  $1, X7, Y7, Y7 // high
-	VPBROADCASTB X5, Y8         // Y8: lomask (unpacked)
-
-	SHRQ  $5, R9         // len(in) / 32
-	MOVQ  out+72(FP), DX // DX: &out
-	MOVQ  in+48(FP), SI  // SI: &in
-	TESTQ R9, R9
-	JZ    done_avx2
-
-loopback_avx2:
-	VMOVDQU (SI), Y0
-	VPSRLQ  $4, Y0, Y1 // Y1: high input
-	VPAND   Y8, Y0, Y0 // Y0: low input
-	VPAND   Y8, Y1, Y1 // Y1: high input
-	VPSHUFB Y0, Y6, Y2 // Y2: mul low part
-	VPSHUFB Y1, Y7, Y3 // Y3: mul high part
-	VPXOR   Y3, Y2, Y4 // Y4: Result
-	VMOVDQU Y4, (DX)
-
-	ADDQ $32, SI       // in+=32
-	ADDQ $32, DX       // out+=32
-	SUBQ $1, R9
-	JNZ  loopback_avx2
-
-done_avx2:
-	VZEROUPPER
-	RET
-
-// func sSE2XorSlice(in, out []byte)
-TEXT ·sSE2XorSlice(SB), 7, $0
-	MOVQ in+0(FP), SI     // SI: &in
-	MOVQ in_len+8(FP), R9 // R9: len(in)
-	MOVQ out+24(FP), DX   // DX: &out
-	SHRQ $4, R9           // len(in) / 16
-	CMPQ R9, $0
-	JEQ  done_xor_sse2
-
-loopback_xor_sse2:
-	MOVOU (SI), X0          // in[x]
-	MOVOU (DX), X1          // out[x]
-	PXOR  X0, X1
-	MOVOU X1, (DX)
-	ADDQ  $16, SI           // in+=16
-	ADDQ  $16, DX           // out+=16
-	SUBQ  $1, R9
-	JNZ   loopback_xor_sse2
-
-done_xor_sse2:
-	RET

+ 0 - 48
vendor/github.com/klauspost/reedsolomon/galois_arm64.go

@@ -1,48 +0,0 @@
-//+build !noasm
-//+build !appengine
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-// Copyright 2017, Minio, Inc.
-
-package reedsolomon
-
-//go:noescape
-func galMulNEON(c uint64, in, out []byte)
-
-//go:noescape
-func galMulXorNEON(c uint64, in, out []byte)
-
-func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
-	var done int
-	galMulNEON(uint64(c), in, out)
-	done = (len(in) >> 5) << 5
-
-	remain := len(in) - done
-	if remain > 0 {
-		mt := mulTable[c]
-		for i := done; i < len(in); i++ {
-			out[i] = mt[in[i]]
-		}
-	}
-}
-
-func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
-	var done int
-	galMulXorNEON(uint64(c), in, out)
-	done = (len(in) >> 5) << 5
-
-	remain := len(in) - done
-	if remain > 0 {
-		mt := mulTable[c]
-		for i := done; i < len(in); i++ {
-			out[i] ^= mt[in[i]]
-		}
-	}
-}
-
-// slice galois add
-func sliceXor(in, out []byte, sse2 bool) {
-	for n, input := range in {
-		out[n] ^= input
-	}
-}

+ 0 - 141
vendor/github.com/klauspost/reedsolomon/galois_arm64.s

@@ -1,141 +0,0 @@
-//+build !noasm !appengine
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-// Copyright 2017, Minio, Inc.
-
-// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to
-// the opcodes of their Plan9 equivalents
-
-// polynomial multiplication
-#define POLYNOMIAL_MULTIPLICATION \
-	WORD $0x0e3ce340 \ // pmull  v0.8h,v26.8b,v28.8b
-	WORD $0x4e3ce346 \ // pmull2 v6.8h,v26.16b,v28.16b
-	WORD $0x0e3ce36c \ // pmull  v12.8h,v27.8b,v28.8b
-	WORD $0x4e3ce372 // pmull2 v18.8h,v27.16b,v28.16b
-
-// first reduction
-#define FIRST_REDUCTION \
-	WORD $0x0f088402 \ // shrn  v2.8b, v0.8h, #8
-	WORD $0x0f0884c8 \ // shrn  v8.8b, v6.8h, #8
-	WORD $0x0f08858e \ // shrn  v14.8b, v12.8h, #8
-	WORD $0x0f088654 \ // shrn  v20.8b, v18.8h, #8
-	WORD $0x0e22e3c3 \ // pmull v3.8h,v30.8b,v2.8b
-	WORD $0x0e28e3c9 \ // pmull v9.8h,v30.8b,v8.8b
-	WORD $0x0e2ee3cf \ // pmull v15.8h,v30.8b,v14.8b
-	WORD $0x0e34e3d5 \ // pmull v21.8h,v30.8b,v20.8b
-	WORD $0x6e201c60 \ // eor   v0.16b,v3.16b,v0.16b
-	WORD $0x6e261d26 \ // eor   v6.16b,v9.16b,v6.16b
-	WORD $0x6e2c1dec \ // eor   v12.16b,v15.16b,v12.16b
-	WORD $0x6e321eb2 // eor   v18.16b,v21.16b,v18.16b
-
-// second reduction
-#define SECOND_REDUCTION \
-	WORD $0x0f088404 \ // shrn  v4.8b, v0.8h, #8
-	WORD $0x0f0884ca \ // shrn  v10.8b, v6.8h, #8
-	WORD $0x0f088590 \ // shrn  v16.8b, v12.8h, #8
-	WORD $0x0f088656 \ // shrn  v22.8b, v18.8h, #8
-	WORD $0x6e241c44 \ // eor   v4.16b,v2.16b,v4.16b
-	WORD $0x6e2a1d0a \ // eor   v10.16b,v8.16b,v10.16b
-	WORD $0x6e301dd0 \ // eor   v16.16b,v14.16b,v16.16b
-	WORD $0x6e361e96 \ // eor   v22.16b,v20.16b,v22.16b
-	WORD $0x0e24e3c5 \ // pmull v5.8h,v30.8b,v4.8b
-	WORD $0x0e2ae3cb \ // pmull v11.8h,v30.8b,v10.8b
-	WORD $0x0e30e3d1 \ // pmull v17.8h,v30.8b,v16.8b
-	WORD $0x0e36e3d7 \ // pmull v23.8h,v30.8b,v22.8b
-	WORD $0x6e201ca0 \ // eor   v0.16b,v5.16b,v0.16b
-	WORD $0x6e261d61 \ // eor   v1.16b,v11.16b,v6.16b
-	WORD $0x6e2c1e22 \ // eor   v2.16b,v17.16b,v12.16b
-	WORD $0x6e321ee3 // eor   v3.16b,v23.16b,v18.16b
-
-// func galMulNEON(c uint64, in, out []byte)
-TEXT ·galMulNEON(SB), 7, $0
-	MOVD c+0(FP), R0
-	MOVD in_base+8(FP), R1
-	MOVD in_len+16(FP), R2   // length of message
-	MOVD out_base+32(FP), R5
-	SUBS $32, R2
-	BMI  complete
-
-	// Load constants table pointer
-	MOVD $·constants(SB), R3
-
-	// and load constants into v30 & v31
-	WORD $0x4c40a07e // ld1    {v30.16b-v31.16b}, [x3]
-
-	WORD $0x4e010c1c // dup    v28.16b, w0
-
-loop:
-	// Main loop
-	WORD $0x4cdfa83a // ld1   {v26.4s-v27.4s}, [x1], #32
-
-	POLYNOMIAL_MULTIPLICATION
-
-	FIRST_REDUCTION
-
-	SECOND_REDUCTION
-
-	// combine results
-	WORD $0x4e1f2000 // tbl v0.16b,{v0.16b,v1.16b},v31.16b
-	WORD $0x4e1f2041 // tbl v1.16b,{v2.16b,v3.16b},v31.16b
-
-	// Store result
-	WORD $0x4c9faca0 // st1    {v0.2d-v1.2d}, [x5], #32
-
-	SUBS $32, R2
-	BPL  loop
-
-complete:
-	RET
-
-// func galMulXorNEON(c uint64, in, out []byte)
-TEXT ·galMulXorNEON(SB), 7, $0
-	MOVD c+0(FP), R0
-	MOVD in_base+8(FP), R1
-	MOVD in_len+16(FP), R2   // length of message
-	MOVD out_base+32(FP), R5
-	SUBS $32, R2
-	BMI  completeXor
-
-	// Load constants table pointer
-	MOVD $·constants(SB), R3
-
-	// and load constants into v30 & v31
-	WORD $0x4c40a07e // ld1    {v30.16b-v31.16b}, [x3]
-
-	WORD $0x4e010c1c // dup    v28.16b, w0
-
-loopXor:
-	// Main loop
-	WORD $0x4cdfa83a // ld1   {v26.4s-v27.4s}, [x1], #32
-	WORD $0x4c40a8b8 // ld1   {v24.4s-v25.4s}, [x5]
-
-	POLYNOMIAL_MULTIPLICATION
-
-	FIRST_REDUCTION
-
-	SECOND_REDUCTION
-
-	// combine results
-	WORD $0x4e1f2000 // tbl v0.16b,{v0.16b,v1.16b},v31.16b
-	WORD $0x4e1f2041 // tbl v1.16b,{v2.16b,v3.16b},v31.16b
-
-	// Xor result and store
-	WORD $0x6e381c00 // eor v0.16b,v0.16b,v24.16b
-	WORD $0x6e391c21 // eor v1.16b,v1.16b,v25.16b
-	WORD $0x4c9faca0 // st1   {v0.2d-v1.2d}, [x5], #32
-
-	SUBS $32, R2
-	BPL  loopXor
-
-completeXor:
-	RET
-
-// Constants table
-//   generating polynomial is 29 (= 0x1d)
-DATA ·constants+0x0(SB)/8, $0x1d1d1d1d1d1d1d1d
-DATA ·constants+0x8(SB)/8, $0x1d1d1d1d1d1d1d1d
-//   constant for TBL instruction
-DATA ·constants+0x10(SB)/8, $0x0e0c0a0806040200
-DATA ·constants+0x18(SB)/8, $0x1e1c1a1816141210
-
-GLOBL ·constants(SB), 8, $32

+ 0 - 27
vendor/github.com/klauspost/reedsolomon/galois_noasm.go

@@ -1,27 +0,0 @@
-//+build !amd64 noasm appengine
-//+build !arm64 noasm appengine
-
-// Copyright 2015, Klaus Post, see LICENSE for details.
-
-package reedsolomon
-
-func galMulSlice(c byte, in, out []byte, ssse3, avx2 bool) {
-	mt := mulTable[c]
-	for n, input := range in {
-		out[n] = mt[input]
-	}
-}
-
-func galMulSliceXor(c byte, in, out []byte, ssse3, avx2 bool) {
-	mt := mulTable[c]
-	for n, input := range in {
-		out[n] ^= mt[input]
-	}
-}
-
-// slice galois add
-func sliceXor(in, out []byte, sse2 bool) {
-	for n, input := range in {
-		out[n] ^= input
-	}
-}

+ 0 - 132
vendor/github.com/klauspost/reedsolomon/gentables.go

@@ -1,132 +0,0 @@
-//+build ignore
-
-package main
-
-import (
-	"fmt"
-)
-
-var logTable = [fieldSize]int16{
-	-1, 0, 1, 25, 2, 50, 26, 198,
-	3, 223, 51, 238, 27, 104, 199, 75,
-	4, 100, 224, 14, 52, 141, 239, 129,
-	28, 193, 105, 248, 200, 8, 76, 113,
-	5, 138, 101, 47, 225, 36, 15, 33,
-	53, 147, 142, 218, 240, 18, 130, 69,
-	29, 181, 194, 125, 106, 39, 249, 185,
-	201, 154, 9, 120, 77, 228, 114, 166,
-	6, 191, 139, 98, 102, 221, 48, 253,
-	226, 152, 37, 179, 16, 145, 34, 136,
-	54, 208, 148, 206, 143, 150, 219, 189,
-	241, 210, 19, 92, 131, 56, 70, 64,
-	30, 66, 182, 163, 195, 72, 126, 110,
-	107, 58, 40, 84, 250, 133, 186, 61,
-	202, 94, 155, 159, 10, 21, 121, 43,
-	78, 212, 229, 172, 115, 243, 167, 87,
-	7, 112, 192, 247, 140, 128, 99, 13,
-	103, 74, 222, 237, 49, 197, 254, 24,
-	227, 165, 153, 119, 38, 184, 180, 124,
-	17, 68, 146, 217, 35, 32, 137, 46,
-	55, 63, 209, 91, 149, 188, 207, 205,
-	144, 135, 151, 178, 220, 252, 190, 97,
-	242, 86, 211, 171, 20, 42, 93, 158,
-	132, 60, 57, 83, 71, 109, 65, 162,
-	31, 45, 67, 216, 183, 123, 164, 118,
-	196, 23, 73, 236, 127, 12, 111, 246,
-	108, 161, 59, 82, 41, 157, 85, 170,
-	251, 96, 134, 177, 187, 204, 62, 90,
-	203, 89, 95, 176, 156, 169, 160, 81,
-	11, 245, 22, 235, 122, 117, 44, 215,
-	79, 174, 213, 233, 230, 231, 173, 232,
-	116, 214, 244, 234, 168, 80, 88, 175,
-}
-
-const (
-	// The number of elements in the field.
-	fieldSize = 256
-
-	// The polynomial used to generate the logarithm table.
-	//
-	// There are a number of polynomials that work to generate
-	// a Galois field of 256 elements.  The choice is arbitrary,
-	// and we just use the first one.
-	//
-	// The possibilities are: 29, 43, 45, 77, 95, 99, 101, 105,
-	//* 113, 135, 141, 169, 195, 207, 231, and 245.
-	generatingPolynomial = 29
-)
-
-func main() {
-	t := generateExpTable()
-	fmt.Printf("var expTable = %#v\n", t)
-	//t2 := generateMulTableSplit(t)
-	//fmt.Printf("var mulTable = %#v\n", t2)
-	low, high := generateMulTableHalf(t)
-	fmt.Printf("var mulTableLow = %#v\n", low)
-	fmt.Printf("var mulTableHigh = %#v\n", high)
-}
-
-/**
- * Generates the inverse log table.
- */
-func generateExpTable() []byte {
-	result := make([]byte, fieldSize*2-2)
-	for i := 1; i < fieldSize; i++ {
-		log := logTable[i]
-		result[log] = byte(i)
-		result[log+fieldSize-1] = byte(i)
-	}
-	return result
-}
-
-func generateMulTable(expTable []byte) []byte {
-	result := make([]byte, 256*256)
-	for v := range result {
-		a := byte(v & 0xff)
-		b := byte(v >> 8)
-		if a == 0 || b == 0 {
-			result[v] = 0
-			continue
-		}
-		logA := int(logTable[a])
-		logB := int(logTable[b])
-		result[v] = expTable[logA+logB]
-	}
-	return result
-}
-
-func generateMulTableSplit(expTable []byte) [256][256]byte {
-	var result [256][256]byte
-	for a := range result {
-		for b := range result[a] {
-			if a == 0 || b == 0 {
-				result[a][b] = 0
-				continue
-			}
-			logA := int(logTable[a])
-			logB := int(logTable[b])
-			result[a][b] = expTable[logA+logB]
-		}
-	}
-	return result
-}
-
-func generateMulTableHalf(expTable []byte) (low [256][16]byte, high [256][16]byte) {
-	for a := range low {
-		for b := range low {
-			result := 0
-			if !(a == 0 || b == 0) {
-				logA := int(logTable[a])
-				logB := int(logTable[b])
-				result = int(expTable[logA+logB])
-			}
-			if (b & 0xf) == b {
-				low[a][b] = byte(result)
-			}
-			if (b & 0xf0) == b {
-				high[a][b>>4] = byte(result)
-			}
-		}
-	}
-	return
-}

+ 0 - 160
vendor/github.com/klauspost/reedsolomon/inversion_tree.go

@@ -1,160 +0,0 @@
-/**
- * A thread-safe tree which caches inverted matrices.
- *
- * Copyright 2016, Peter Collins
- */
-
-package reedsolomon
-
-import (
-	"errors"
-	"sync"
-)
-
-// The tree uses a Reader-Writer mutex to make it thread-safe
-// when accessing cached matrices and inserting new ones.
-type inversionTree struct {
-	mutex *sync.RWMutex
-	root  inversionNode
-}
-
-type inversionNode struct {
-	matrix   matrix
-	children []*inversionNode
-}
-
-// newInversionTree initializes a tree for storing inverted matrices.
-// Note that the root node is the identity matrix as it implies
-// there were no errors with the original data.
-func newInversionTree(dataShards, parityShards int) inversionTree {
-	identity, _ := identityMatrix(dataShards)
-	root := inversionNode{
-		matrix:   identity,
-		children: make([]*inversionNode, dataShards+parityShards),
-	}
-	return inversionTree{
-		mutex: &sync.RWMutex{},
-		root:  root,
-	}
-}
-
-// GetInvertedMatrix returns the cached inverted matrix or nil if it
-// is not found in the tree keyed on the indices of invalid rows.
-func (t inversionTree) GetInvertedMatrix(invalidIndices []int) matrix {
-	// Lock the tree for reading before accessing the tree.
-	t.mutex.RLock()
-	defer t.mutex.RUnlock()
-
-	// If no invalid indices were give we should return the root
-	// identity matrix.
-	if len(invalidIndices) == 0 {
-		return t.root.matrix
-	}
-
-	// Recursively search for the inverted matrix in the tree, passing in
-	// 0 as the parent index as we start at the root of the tree.
-	return t.root.getInvertedMatrix(invalidIndices, 0)
-}
-
-// errAlreadySet is returned if the root node matrix is overwritten
-var errAlreadySet = errors.New("the root node identity matrix is already set")
-
-// InsertInvertedMatrix inserts a new inverted matrix into the tree
-// keyed by the indices of invalid rows.  The total number of shards
-// is required for creating the proper length lists of child nodes for
-// each node.
-func (t inversionTree) InsertInvertedMatrix(invalidIndices []int, matrix matrix, shards int) error {
-	// If no invalid indices were given then we are done because the
-	// root node is already set with the identity matrix.
-	if len(invalidIndices) == 0 {
-		return errAlreadySet
-	}
-
-	if !matrix.IsSquare() {
-		return errNotSquare
-	}
-
-	// Lock the tree for writing and reading before accessing the tree.
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-
-	// Recursively create nodes for the inverted matrix in the tree until
-	// we reach the node to insert the matrix to.  We start by passing in
-	// 0 as the parent index as we start at the root of the tree.
-	t.root.insertInvertedMatrix(invalidIndices, matrix, shards, 0)
-
-	return nil
-}
-
-func (n inversionNode) getInvertedMatrix(invalidIndices []int, parent int) matrix {
-	// Get the child node to search next from the list of children.  The
-	// list of children starts relative to the parent index passed in
-	// because the indices of invalid rows is sorted (by default).  As we
-	// search recursively, the first invalid index gets popped off the list,
-	// so when searching through the list of children, use that first invalid
-	// index to find the child node.
-	firstIndex := invalidIndices[0]
-	node := n.children[firstIndex-parent]
-
-	// If the child node doesn't exist in the list yet, fail fast by
-	// returning, so we can construct and insert the proper inverted matrix.
-	if node == nil {
-		return nil
-	}
-
-	// If there's more than one invalid index left in the list we should
-	// keep searching recursively.
-	if len(invalidIndices) > 1 {
-		// Search recursively on the child node by passing in the invalid indices
-		// with the first index popped off the front.  Also the parent index to
-		// pass down is the first index plus one.
-		return node.getInvertedMatrix(invalidIndices[1:], firstIndex+1)
-	}
-	// If there aren't any more invalid indices to search, we've found our
-	// node.  Return it, however keep in mind that the matrix could still be
-	// nil because intermediary nodes in the tree are created sometimes with
-	// their inversion matrices uninitialized.
-	return node.matrix
-}
-
-func (n inversionNode) insertInvertedMatrix(invalidIndices []int, matrix matrix, shards, parent int) {
-	// As above, get the child node to search next from the list of children.
-	// The list of children starts relative to the parent index passed in
-	// because the indices of invalid rows is sorted (by default).  As we
-	// search recursively, the first invalid index gets popped off the list,
-	// so when searching through the list of children, use that first invalid
-	// index to find the child node.
-	firstIndex := invalidIndices[0]
-	node := n.children[firstIndex-parent]
-
-	// If the child node doesn't exist in the list yet, create a new
-	// node because we have the writer lock and add it to the list
-	// of children.
-	if node == nil {
-		// Make the length of the list of children equal to the number
-		// of shards minus the first invalid index because the list of
-		// invalid indices is sorted, so only this length of errors
-		// are possible in the tree.
-		node = &inversionNode{
-			children: make([]*inversionNode, shards-firstIndex),
-		}
-		// Insert the new node into the tree at the first index relative
-		// to the parent index that was given in this recursive call.
-		n.children[firstIndex-parent] = node
-	}
-
-	// If there's more than one invalid index left in the list we should
-	// keep searching recursively in order to find the node to add our
-	// matrix.
-	if len(invalidIndices) > 1 {
-		// As above, search recursively on the child node by passing in
-		// the invalid indices with the first index popped off the front.
-		// Also the total number of shards and parent index are passed down
-		// which is equal to the first index plus one.
-		node.insertInvertedMatrix(invalidIndices[1:], matrix, shards, firstIndex+1)
-	} else {
-		// If there aren't any more invalid indices to search, we've found our
-		// node.  Cache the inverted matrix in this node.
-		node.matrix = matrix
-	}
-}

+ 0 - 279
vendor/github.com/klauspost/reedsolomon/matrix.go

@@ -1,279 +0,0 @@
-/**
- * Matrix Algebra over an 8-bit Galois Field
- *
- * Copyright 2015, Klaus Post
- * Copyright 2015, Backblaze, Inc.
- */
-
-package reedsolomon
-
-import (
-	"errors"
-	"fmt"
-	"strconv"
-	"strings"
-)
-
-// byte[row][col]
-type matrix [][]byte
-
-// newMatrix returns a matrix of zeros.
-func newMatrix(rows, cols int) (matrix, error) {
-	if rows <= 0 {
-		return nil, errInvalidRowSize
-	}
-	if cols <= 0 {
-		return nil, errInvalidColSize
-	}
-
-	m := matrix(make([][]byte, rows))
-	for i := range m {
-		m[i] = make([]byte, cols)
-	}
-	return m, nil
-}
-
-// NewMatrixData initializes a matrix with the given row-major data.
-// Note that data is not copied from input.
-func newMatrixData(data [][]byte) (matrix, error) {
-	m := matrix(data)
-	err := m.Check()
-	if err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-// IdentityMatrix returns an identity matrix of the given size.
-func identityMatrix(size int) (matrix, error) {
-	m, err := newMatrix(size, size)
-	if err != nil {
-		return nil, err
-	}
-	for i := range m {
-		m[i][i] = 1
-	}
-	return m, nil
-}
-
-// errInvalidRowSize will be returned if attempting to create a matrix with negative or zero row number.
-var errInvalidRowSize = errors.New("invalid row size")
-
-// errInvalidColSize will be returned if attempting to create a matrix with negative or zero column number.
-var errInvalidColSize = errors.New("invalid column size")
-
-// errColSizeMismatch is returned if the size of matrix columns mismatch.
-var errColSizeMismatch = errors.New("column size is not the same for all rows")
-
-func (m matrix) Check() error {
-	rows := len(m)
-	if rows <= 0 {
-		return errInvalidRowSize
-	}
-	cols := len(m[0])
-	if cols <= 0 {
-		return errInvalidColSize
-	}
-
-	for _, col := range m {
-		if len(col) != cols {
-			return errColSizeMismatch
-		}
-	}
-	return nil
-}
-
-// String returns a human-readable string of the matrix contents.
-//
-// Example: [[1, 2], [3, 4]]
-func (m matrix) String() string {
-	rowOut := make([]string, 0, len(m))
-	for _, row := range m {
-		colOut := make([]string, 0, len(row))
-		for _, col := range row {
-			colOut = append(colOut, strconv.Itoa(int(col)))
-		}
-		rowOut = append(rowOut, "["+strings.Join(colOut, ", ")+"]")
-	}
-	return "[" + strings.Join(rowOut, ", ") + "]"
-}
-
-// Multiply multiplies this matrix (the one on the left) by another
-// matrix (the one on the right) and returns a new matrix with the result.
-func (m matrix) Multiply(right matrix) (matrix, error) {
-	if len(m[0]) != len(right) {
-		return nil, fmt.Errorf("columns on left (%d) is different than rows on right (%d)", len(m[0]), len(right))
-	}
-	result, _ := newMatrix(len(m), len(right[0]))
-	for r, row := range result {
-		for c := range row {
-			var value byte
-			for i := range m[0] {
-				value ^= galMultiply(m[r][i], right[i][c])
-			}
-			result[r][c] = value
-		}
-	}
-	return result, nil
-}
-
-// Augment returns the concatenation of this matrix and the matrix on the right.
-func (m matrix) Augment(right matrix) (matrix, error) {
-	if len(m) != len(right) {
-		return nil, errMatrixSize
-	}
-
-	result, _ := newMatrix(len(m), len(m[0])+len(right[0]))
-	for r, row := range m {
-		for c := range row {
-			result[r][c] = m[r][c]
-		}
-		cols := len(m[0])
-		for c := range right[0] {
-			result[r][cols+c] = right[r][c]
-		}
-	}
-	return result, nil
-}
-
-// errMatrixSize is returned if matrix dimensions are doesn't match.
-var errMatrixSize = errors.New("matrix sizes do not match")
-
-func (m matrix) SameSize(n matrix) error {
-	if len(m) != len(n) {
-		return errMatrixSize
-	}
-	for i := range m {
-		if len(m[i]) != len(n[i]) {
-			return errMatrixSize
-		}
-	}
-	return nil
-}
-
-// Returns a part of this matrix. Data is copied.
-func (m matrix) SubMatrix(rmin, cmin, rmax, cmax int) (matrix, error) {
-	result, err := newMatrix(rmax-rmin, cmax-cmin)
-	if err != nil {
-		return nil, err
-	}
-	// OPTME: If used heavily, use copy function to copy slice
-	for r := rmin; r < rmax; r++ {
-		for c := cmin; c < cmax; c++ {
-			result[r-rmin][c-cmin] = m[r][c]
-		}
-	}
-	return result, nil
-}
-
-// SwapRows Exchanges two rows in the matrix.
-func (m matrix) SwapRows(r1, r2 int) error {
-	if r1 < 0 || len(m) <= r1 || r2 < 0 || len(m) <= r2 {
-		return errInvalidRowSize
-	}
-	m[r2], m[r1] = m[r1], m[r2]
-	return nil
-}
-
-// IsSquare will return true if the matrix is square
-// and nil if the matrix is square
-func (m matrix) IsSquare() bool {
-	return len(m) == len(m[0])
-}
-
-// errSingular is returned if the matrix is singular and cannot be inversed
-var errSingular = errors.New("matrix is singular")
-
-// errNotSquare is returned if attempting to inverse a non-square matrix.
-var errNotSquare = errors.New("only square matrices can be inverted")
-
-// Invert returns the inverse of this matrix.
-// Returns ErrSingular when the matrix is singular and doesn't have an inverse.
-// The matrix must be square, otherwise ErrNotSquare is returned.
-func (m matrix) Invert() (matrix, error) {
-	if !m.IsSquare() {
-		return nil, errNotSquare
-	}
-
-	size := len(m)
-	work, _ := identityMatrix(size)
-	work, _ = m.Augment(work)
-
-	err := work.gaussianElimination()
-	if err != nil {
-		return nil, err
-	}
-
-	return work.SubMatrix(0, size, size, size*2)
-}
-
-func (m matrix) gaussianElimination() error {
-	rows := len(m)
-	columns := len(m[0])
-	// Clear out the part below the main diagonal and scale the main
-	// diagonal to be 1.
-	for r := 0; r < rows; r++ {
-		// If the element on the diagonal is 0, find a row below
-		// that has a non-zero and swap them.
-		if m[r][r] == 0 {
-			for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
-				if m[rowBelow][r] != 0 {
-					m.SwapRows(r, rowBelow)
-					break
-				}
-			}
-		}
-		// If we couldn't find one, the matrix is singular.
-		if m[r][r] == 0 {
-			return errSingular
-		}
-		// Scale to 1.
-		if m[r][r] != 1 {
-			scale := galDivide(1, m[r][r])
-			for c := 0; c < columns; c++ {
-				m[r][c] = galMultiply(m[r][c], scale)
-			}
-		}
-		// Make everything below the 1 be a 0 by subtracting
-		// a multiple of it.  (Subtraction and addition are
-		// both exclusive or in the Galois field.)
-		for rowBelow := r + 1; rowBelow < rows; rowBelow++ {
-			if m[rowBelow][r] != 0 {
-				scale := m[rowBelow][r]
-				for c := 0; c < columns; c++ {
-					m[rowBelow][c] ^= galMultiply(scale, m[r][c])
-				}
-			}
-		}
-	}
-
-	// Now clear the part above the main diagonal.
-	for d := 0; d < rows; d++ {
-		for rowAbove := 0; rowAbove < d; rowAbove++ {
-			if m[rowAbove][d] != 0 {
-				scale := m[rowAbove][d]
-				for c := 0; c < columns; c++ {
-					m[rowAbove][c] ^= galMultiply(scale, m[d][c])
-				}
-
-			}
-		}
-	}
-	return nil
-}
-
-// Create a Vandermonde matrix, which is guaranteed to have the
-// property that any subset of rows that forms a square matrix
-// is invertible.
-func vandermonde(rows, cols int) (matrix, error) {
-	result, err := newMatrix(rows, cols)
-	if err != nil {
-		return nil, err
-	}
-	for r, row := range result {
-		for c := range row {
-			result[r][c] = galExp(byte(r), c)
-		}
-	}
-	return result, nil
-}

+ 0 - 111
vendor/github.com/klauspost/reedsolomon/options.go

@@ -1,111 +0,0 @@
-package reedsolomon
-
-import (
-	"runtime"
-
-	"github.com/klauspost/cpuid"
-)
-
-// Option allows to override processing parameters.
-type Option func(*options)
-
-type options struct {
-	maxGoroutines              int
-	minSplitSize               int
-	useAVX2, useSSSE3, useSSE2 bool
-	usePAR1Matrix              bool
-	useCauchy                  bool
-	shardSize                  int
-}
-
-var defaultOptions = options{
-	maxGoroutines: 384,
-	minSplitSize:  1024,
-}
-
-func init() {
-	if runtime.GOMAXPROCS(0) <= 1 {
-		defaultOptions.maxGoroutines = 1
-	}
-	// Detect CPU capabilities.
-	defaultOptions.useSSSE3 = cpuid.CPU.SSSE3()
-	defaultOptions.useAVX2 = cpuid.CPU.AVX2()
-	defaultOptions.useSSE2 = cpuid.CPU.SSE2()
-}
-
-// WithMaxGoroutines is the maximum number of goroutines number for encoding & decoding.
-// Jobs will be split into this many parts, unless each goroutine would have to process
-// less than minSplitSize bytes (set with WithMinSplitSize).
-// For the best speed, keep this well above the GOMAXPROCS number for more fine grained
-// scheduling.
-// If n <= 0, it is ignored.
-func WithMaxGoroutines(n int) Option {
-	return func(o *options) {
-		if n > 0 {
-			o.maxGoroutines = n
-		}
-	}
-}
-
-// WithAutoGoroutines will adjust the number of goroutines for optimal speed with a
-// specific shard size.
-// Send in the shard size you expect to send. Other shard sizes will work, but may not
-// run at the optimal speed.
-// Overwrites WithMaxGoroutines.
-// If shardSize <= 0, it is ignored.
-func WithAutoGoroutines(shardSize int) Option {
-	return func(o *options) {
-		o.shardSize = shardSize
-	}
-}
-
-// WithMinSplitSize is the minimum encoding size in bytes per goroutine.
-// See WithMaxGoroutines on how jobs are split.
-// If n <= 0, it is ignored.
-func WithMinSplitSize(n int) Option {
-	return func(o *options) {
-		if n > 0 {
-			o.minSplitSize = n
-		}
-	}
-}
-
-func withSSE3(enabled bool) Option {
-	return func(o *options) {
-		o.useSSSE3 = enabled
-	}
-}
-
-func withAVX2(enabled bool) Option {
-	return func(o *options) {
-		o.useAVX2 = enabled
-	}
-}
-
-func withSSE2(enabled bool) Option {
-	return func(o *options) {
-		o.useSSE2 = enabled
-	}
-}
-
-// WithPAR1Matrix causes the encoder to build the matrix how PARv1
-// does. Note that the method they use is buggy, and may lead to cases
-// where recovery is impossible, even if there are enough parity
-// shards.
-func WithPAR1Matrix() Option {
-	return func(o *options) {
-		o.usePAR1Matrix = true
-		o.useCauchy = false
-	}
-}
-
-// WithCauchyMatrix will make the encoder build a Cauchy style matrix.
-// The output of this is not compatible with the standard output.
-// A Cauchy matrix is faster to generate. This does not affect data throughput,
-// but will result in slightly faster start-up time.
-func WithCauchyMatrix() Option {
-	return func(o *options) {
-		o.useCauchy = true
-		o.usePAR1Matrix = false
-	}
-}

+ 0 - 884
vendor/github.com/klauspost/reedsolomon/reedsolomon.go

@@ -1,884 +0,0 @@
-/**
- * Reed-Solomon Coding over 8-bit values.
- *
- * Copyright 2015, Klaus Post
- * Copyright 2015, Backblaze, Inc.
- */
-
-// Package reedsolomon enables Erasure Coding in Go
-//
-// For usage and examples, see https://github.com/klauspost/reedsolomon
-//
-package reedsolomon
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"runtime"
-	"sync"
-
-	"github.com/klauspost/cpuid"
-)
-
-// Encoder is an interface to encode Reed-Salomon parity sets for your data.
-type Encoder interface {
-	// Encodes parity for a set of data shards.
-	// Input is 'shards' containing data shards followed by parity shards.
-	// The number of shards must match the number given to New().
-	// Each shard is a byte array, and they must all be the same size.
-	// The parity shards will always be overwritten and the data shards
-	// will remain the same, so it is safe for you to read from the
-	// data shards while this is running.
-	Encode(shards [][]byte) error
-
-	// Verify returns true if the parity shards contain correct data.
-	// The data is the same format as Encode. No data is modified, so
-	// you are allowed to read from data while this is running.
-	Verify(shards [][]byte) (bool, error)
-
-	// Reconstruct will recreate the missing shards if possible.
-	//
-	// Given a list of shards, some of which contain data, fills in the
-	// ones that don't have data.
-	//
-	// The length of the array must be equal to the total number of shards.
-	// You indicate that a shard is missing by setting it to nil or zero-length.
-	// If a shard is zero-length but has sufficient capacity, that memory will
-	// be used, otherwise a new []byte will be allocated.
-	//
-	// If there are too few shards to reconstruct the missing
-	// ones, ErrTooFewShards will be returned.
-	//
-	// The reconstructed shard set is complete, but integrity is not verified.
-	// Use the Verify function to check if data set is ok.
-	Reconstruct(shards [][]byte) error
-
-	// ReconstructData will recreate any missing data shards, if possible.
-	//
-	// Given a list of shards, some of which contain data, fills in the
-	// data shards that don't have data.
-	//
-	// The length of the array must be equal to Shards.
-	// You indicate that a shard is missing by setting it to nil or zero-length.
-	// If a shard is zero-length but has sufficient capacity, that memory will
-	// be used, otherwise a new []byte will be allocated.
-	//
-	// If there are too few shards to reconstruct the missing
-	// ones, ErrTooFewShards will be returned.
-	//
-	// As the reconstructed shard set may contain missing parity shards,
-	// calling the Verify function is likely to fail.
-	ReconstructData(shards [][]byte) error
-
-	// Update parity is use for change a few data shards and update it's parity.
-	// Input 'newDatashards' containing data shards changed.
-	// Input 'shards' containing old data shards (if data shard not changed, it can be nil) and old parity shards.
-	// new parity shards will in shards[DataShards:]
-	// Update is very useful if  DataShards much larger than ParityShards and changed data shards is few. It will
-	// faster than Encode and not need read all data shards to encode.
-	Update(shards [][]byte, newDatashards [][]byte) error
-
-	// Split a data slice into the number of shards given to the encoder,
-	// and create empty parity shards.
-	//
-	// The data will be split into equally sized shards.
-	// If the data size isn't dividable by the number of shards,
-	// the last shard will contain extra zeros.
-	//
-	// There must be at least 1 byte otherwise ErrShortData will be
-	// returned.
-	//
-	// The data will not be copied, except for the last shard, so you
-	// should not modify the data of the input slice afterwards.
-	Split(data []byte) ([][]byte, error)
-
-	// Join the shards and write the data segment to dst.
-	//
-	// Only the data shards are considered.
-	// You must supply the exact output size you want.
-	// If there are to few shards given, ErrTooFewShards will be returned.
-	// If the total data size is less than outSize, ErrShortData will be returned.
-	Join(dst io.Writer, shards [][]byte, outSize int) error
-}
-
-// reedSolomon contains a matrix for a specific
-// distribution of datashards and parity shards.
-// Construct if using New()
-type reedSolomon struct {
-	DataShards   int // Number of data shards, should not be modified.
-	ParityShards int // Number of parity shards, should not be modified.
-	Shards       int // Total number of shards. Calculated, and should not be modified.
-	m            matrix
-	tree         inversionTree
-	parity       [][]byte
-	o            options
-}
-
-// ErrInvShardNum will be returned by New, if you attempt to create
-// an Encoder where either data or parity shards is zero or less.
-var ErrInvShardNum = errors.New("cannot create Encoder with zero or less data/parity shards")
-
-// ErrMaxShardNum will be returned by New, if you attempt to create an
-// Encoder where data and parity shards are bigger than the order of
-// GF(2^8).
-var ErrMaxShardNum = errors.New("cannot create Encoder with more than 256 data+parity shards")
-
-// buildMatrix creates the matrix to use for encoding, given the
-// number of data shards and the number of total shards.
-//
-// The top square of the matrix is guaranteed to be an identity
-// matrix, which means that the data shards are unchanged after
-// encoding.
-func buildMatrix(dataShards, totalShards int) (matrix, error) {
-	// Start with a Vandermonde matrix.  This matrix would work,
-	// in theory, but doesn't have the property that the data
-	// shards are unchanged after encoding.
-	vm, err := vandermonde(totalShards, dataShards)
-	if err != nil {
-		return nil, err
-	}
-
-	// Multiply by the inverse of the top square of the matrix.
-	// This will make the top square be the identity matrix, but
-	// preserve the property that any square subset of rows is
-	// invertible.
-	top, err := vm.SubMatrix(0, 0, dataShards, dataShards)
-	if err != nil {
-		return nil, err
-	}
-
-	topInv, err := top.Invert()
-	if err != nil {
-		return nil, err
-	}
-
-	return vm.Multiply(topInv)
-}
-
-// buildMatrixPAR1 creates the matrix to use for encoding according to
-// the PARv1 spec, given the number of data shards and the number of
-// total shards. Note that the method they use is buggy, and may lead
-// to cases where recovery is impossible, even if there are enough
-// parity shards.
-//
-// The top square of the matrix is guaranteed to be an identity
-// matrix, which means that the data shards are unchanged after
-// encoding.
-func buildMatrixPAR1(dataShards, totalShards int) (matrix, error) {
-	result, err := newMatrix(totalShards, dataShards)
-	if err != nil {
-		return nil, err
-	}
-
-	for r, row := range result {
-		// The top portion of the matrix is the identity
-		// matrix, and the bottom is a transposed Vandermonde
-		// matrix starting at 1 instead of 0.
-		if r < dataShards {
-			result[r][r] = 1
-		} else {
-			for c := range row {
-				result[r][c] = galExp(byte(c+1), r-dataShards)
-			}
-		}
-	}
-	return result, nil
-}
-
-func buildMatrixCauchy(dataShards, totalShards int) (matrix, error) {
-	result, err := newMatrix(totalShards, dataShards)
-	if err != nil {
-		return nil, err
-	}
-
-	for r, row := range result {
-		// The top portion of the matrix is the identity
-		// matrix, and the bottom is a transposed Cauchy matrix.
-		if r < dataShards {
-			result[r][r] = 1
-		} else {
-			for c := range row {
-				result[r][c] = invTable[(byte(r ^ c))]
-			}
-		}
-	}
-	return result, nil
-}
-
-// New creates a new encoder and initializes it to
-// the number of data shards and parity shards that
-// you want to use. You can reuse this encoder.
-// Note that the maximum number of total shards is 256.
-// If no options are supplied, default options are used.
-func New(dataShards, parityShards int, opts ...Option) (Encoder, error) {
-	r := reedSolomon{
-		DataShards:   dataShards,
-		ParityShards: parityShards,
-		Shards:       dataShards + parityShards,
-		o:            defaultOptions,
-	}
-
-	for _, opt := range opts {
-		opt(&r.o)
-	}
-	if dataShards <= 0 || parityShards <= 0 {
-		return nil, ErrInvShardNum
-	}
-
-	if dataShards+parityShards > 256 {
-		return nil, ErrMaxShardNum
-	}
-
-	var err error
-	switch {
-	case r.o.useCauchy:
-		r.m, err = buildMatrixCauchy(dataShards, r.Shards)
-	case r.o.usePAR1Matrix:
-		r.m, err = buildMatrixPAR1(dataShards, r.Shards)
-	default:
-		r.m, err = buildMatrix(dataShards, r.Shards)
-	}
-	if err != nil {
-		return nil, err
-	}
-	if r.o.shardSize > 0 {
-		cacheSize := cpuid.CPU.Cache.L2
-		if cacheSize <= 0 {
-			// Set to 128K if undetectable.
-			cacheSize = 128 << 10
-		}
-		p := runtime.NumCPU()
-
-		// 1 input + parity must fit in cache, and we add one more to be safer.
-		shards := 1 + parityShards
-		g := (r.o.shardSize * shards) / (cacheSize - (cacheSize >> 4))
-
-		if cpuid.CPU.ThreadsPerCore > 1 {
-			// If multiple threads per core, make sure they don't contend for cache.
-			g *= cpuid.CPU.ThreadsPerCore
-		}
-		g *= 2
-		if g < p {
-			g = p
-		}
-
-		// Have g be multiple of p
-		g += p - 1
-		g -= g % p
-
-		r.o.maxGoroutines = g
-	}
-
-	// Inverted matrices are cached in a tree keyed by the indices
-	// of the invalid rows of the data to reconstruct.
-	// The inversion root node will have the identity matrix as
-	// its inversion matrix because it implies there are no errors
-	// with the original data.
-	r.tree = newInversionTree(dataShards, parityShards)
-
-	r.parity = make([][]byte, parityShards)
-	for i := range r.parity {
-		r.parity[i] = r.m[dataShards+i]
-	}
-
-	return &r, err
-}
-
-// ErrTooFewShards is returned if too few shards where given to
-// Encode/Verify/Reconstruct/Update. It will also be returned from Reconstruct
-// if there were too few shards to reconstruct the missing data.
-var ErrTooFewShards = errors.New("too few shards given")
-
-// Encodes parity for a set of data shards.
-// An array 'shards' containing data shards followed by parity shards.
-// The number of shards must match the number given to New.
-// Each shard is a byte array, and they must all be the same size.
-// The parity shards will always be overwritten and the data shards
-// will remain the same.
-func (r reedSolomon) Encode(shards [][]byte) error {
-	if len(shards) != r.Shards {
-		return ErrTooFewShards
-	}
-
-	err := checkShards(shards, false)
-	if err != nil {
-		return err
-	}
-
-	// Get the slice of output buffers.
-	output := shards[r.DataShards:]
-
-	// Do the coding.
-	r.codeSomeShards(r.parity, shards[0:r.DataShards], output, r.ParityShards, len(shards[0]))
-	return nil
-}
-
-// ErrInvalidInput is returned if invalid input parameter of Update.
-var ErrInvalidInput = errors.New("invalid input")
-
-func (r reedSolomon) Update(shards [][]byte, newDatashards [][]byte) error {
-	if len(shards) != r.Shards {
-		return ErrTooFewShards
-	}
-
-	if len(newDatashards) != r.DataShards {
-		return ErrTooFewShards
-	}
-
-	err := checkShards(shards, true)
-	if err != nil {
-		return err
-	}
-
-	err = checkShards(newDatashards, true)
-	if err != nil {
-		return err
-	}
-
-	for i := range newDatashards {
-		if newDatashards[i] != nil && shards[i] == nil {
-			return ErrInvalidInput
-		}
-	}
-	for _, p := range shards[r.DataShards:] {
-		if p == nil {
-			return ErrInvalidInput
-		}
-	}
-
-	shardSize := shardSize(shards)
-
-	// Get the slice of output buffers.
-	output := shards[r.DataShards:]
-
-	// Do the coding.
-	r.updateParityShards(r.parity, shards[0:r.DataShards], newDatashards[0:r.DataShards], output, r.ParityShards, shardSize)
-	return nil
-}
-
-func (r reedSolomon) updateParityShards(matrixRows, oldinputs, newinputs, outputs [][]byte, outputCount, byteCount int) {
-	if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
-		r.updateParityShardsP(matrixRows, oldinputs, newinputs, outputs, outputCount, byteCount)
-		return
-	}
-
-	for c := 0; c < r.DataShards; c++ {
-		in := newinputs[c]
-		if in == nil {
-			continue
-		}
-		oldin := oldinputs[c]
-		// oldinputs data will be change
-		sliceXor(in, oldin, r.o.useSSE2)
-		for iRow := 0; iRow < outputCount; iRow++ {
-			galMulSliceXor(matrixRows[iRow][c], oldin, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
-		}
-	}
-}
-
-func (r reedSolomon) updateParityShardsP(matrixRows, oldinputs, newinputs, outputs [][]byte, outputCount, byteCount int) {
-	var wg sync.WaitGroup
-	do := byteCount / r.o.maxGoroutines
-	if do < r.o.minSplitSize {
-		do = r.o.minSplitSize
-	}
-	start := 0
-	for start < byteCount {
-		if start+do > byteCount {
-			do = byteCount - start
-		}
-		wg.Add(1)
-		go func(start, stop int) {
-			for c := 0; c < r.DataShards; c++ {
-				in := newinputs[c]
-				if in == nil {
-					continue
-				}
-				oldin := oldinputs[c]
-				// oldinputs data will be change
-				sliceXor(in[start:stop], oldin[start:stop], r.o.useSSE2)
-				for iRow := 0; iRow < outputCount; iRow++ {
-					galMulSliceXor(matrixRows[iRow][c], oldin[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
-				}
-			}
-			wg.Done()
-		}(start, start+do)
-		start += do
-	}
-	wg.Wait()
-}
-
-// Verify returns true if the parity shards contain the right data.
-// The data is the same format as Encode. No data is modified.
-func (r reedSolomon) Verify(shards [][]byte) (bool, error) {
-	if len(shards) != r.Shards {
-		return false, ErrTooFewShards
-	}
-	err := checkShards(shards, false)
-	if err != nil {
-		return false, err
-	}
-
-	// Slice of buffers being checked.
-	toCheck := shards[r.DataShards:]
-
-	// Do the checking.
-	return r.checkSomeShards(r.parity, shards[0:r.DataShards], toCheck, r.ParityShards, len(shards[0])), nil
-}
-
-// Multiplies a subset of rows from a coding matrix by a full set of
-// input shards to produce some output shards.
-// 'matrixRows' is The rows from the matrix to use.
-// 'inputs' An array of byte arrays, each of which is one input shard.
-// The number of inputs used is determined by the length of each matrix row.
-// outputs Byte arrays where the computed shards are stored.
-// The number of outputs computed, and the
-// number of matrix rows used, is determined by
-// outputCount, which is the number of outputs to compute.
-func (r reedSolomon) codeSomeShards(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
-	if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
-		r.codeSomeShardsP(matrixRows, inputs, outputs, outputCount, byteCount)
-		return
-	}
-	for c := 0; c < r.DataShards; c++ {
-		in := inputs[c]
-		for iRow := 0; iRow < outputCount; iRow++ {
-			if c == 0 {
-				galMulSlice(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
-			} else {
-				galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
-			}
-		}
-	}
-}
-
-// Perform the same as codeSomeShards, but split the workload into
-// several goroutines.
-func (r reedSolomon) codeSomeShardsP(matrixRows, inputs, outputs [][]byte, outputCount, byteCount int) {
-	var wg sync.WaitGroup
-	do := byteCount / r.o.maxGoroutines
-	if do < r.o.minSplitSize {
-		do = r.o.minSplitSize
-	}
-	// Make sizes divisible by 16
-	do = (do + 15) & (^15)
-	start := 0
-	for start < byteCount {
-		if start+do > byteCount {
-			do = byteCount - start
-		}
-		wg.Add(1)
-		go func(start, stop int) {
-			for c := 0; c < r.DataShards; c++ {
-				in := inputs[c]
-				for iRow := 0; iRow < outputCount; iRow++ {
-					if c == 0 {
-						galMulSlice(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
-					} else {
-						galMulSliceXor(matrixRows[iRow][c], in[start:stop], outputs[iRow][start:stop], r.o.useSSSE3, r.o.useAVX2)
-					}
-				}
-			}
-			wg.Done()
-		}(start, start+do)
-		start += do
-	}
-	wg.Wait()
-}
-
-// checkSomeShards is mostly the same as codeSomeShards,
-// except this will check values and return
-// as soon as a difference is found.
-func (r reedSolomon) checkSomeShards(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
-	if r.o.maxGoroutines > 1 && byteCount > r.o.minSplitSize {
-		return r.checkSomeShardsP(matrixRows, inputs, toCheck, outputCount, byteCount)
-	}
-	outputs := make([][]byte, len(toCheck))
-	for i := range outputs {
-		outputs[i] = make([]byte, byteCount)
-	}
-	for c := 0; c < r.DataShards; c++ {
-		in := inputs[c]
-		for iRow := 0; iRow < outputCount; iRow++ {
-			galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
-		}
-	}
-
-	for i, calc := range outputs {
-		if !bytes.Equal(calc, toCheck[i]) {
-			return false
-		}
-	}
-	return true
-}
-
-func (r reedSolomon) checkSomeShardsP(matrixRows, inputs, toCheck [][]byte, outputCount, byteCount int) bool {
-	same := true
-	var mu sync.RWMutex // For above
-
-	var wg sync.WaitGroup
-	do := byteCount / r.o.maxGoroutines
-	if do < r.o.minSplitSize {
-		do = r.o.minSplitSize
-	}
-	// Make sizes divisible by 16
-	do = (do + 15) & (^15)
-	start := 0
-	for start < byteCount {
-		if start+do > byteCount {
-			do = byteCount - start
-		}
-		wg.Add(1)
-		go func(start, do int) {
-			defer wg.Done()
-			outputs := make([][]byte, len(toCheck))
-			for i := range outputs {
-				outputs[i] = make([]byte, do)
-			}
-			for c := 0; c < r.DataShards; c++ {
-				mu.RLock()
-				if !same {
-					mu.RUnlock()
-					return
-				}
-				mu.RUnlock()
-				in := inputs[c][start : start+do]
-				for iRow := 0; iRow < outputCount; iRow++ {
-					galMulSliceXor(matrixRows[iRow][c], in, outputs[iRow], r.o.useSSSE3, r.o.useAVX2)
-				}
-			}
-
-			for i, calc := range outputs {
-				if !bytes.Equal(calc, toCheck[i][start:start+do]) {
-					mu.Lock()
-					same = false
-					mu.Unlock()
-					return
-				}
-			}
-		}(start, do)
-		start += do
-	}
-	wg.Wait()
-	return same
-}
-
-// ErrShardNoData will be returned if there are no shards,
-// or if the length of all shards is zero.
-var ErrShardNoData = errors.New("no shard data")
-
-// ErrShardSize is returned if shard length isn't the same for all
-// shards.
-var ErrShardSize = errors.New("shard sizes do not match")
-
-// checkShards will check if shards are the same size
-// or 0, if allowed. An error is returned if this fails.
-// An error is also returned if all shards are size 0.
-func checkShards(shards [][]byte, nilok bool) error {
-	size := shardSize(shards)
-	if size == 0 {
-		return ErrShardNoData
-	}
-	for _, shard := range shards {
-		if len(shard) != size {
-			if len(shard) != 0 || !nilok {
-				return ErrShardSize
-			}
-		}
-	}
-	return nil
-}
-
-// shardSize return the size of a single shard.
-// The first non-zero size is returned,
-// or 0 if all shards are size 0.
-func shardSize(shards [][]byte) int {
-	for _, shard := range shards {
-		if len(shard) != 0 {
-			return len(shard)
-		}
-	}
-	return 0
-}
-
-// Reconstruct will recreate the missing shards, if possible.
-//
-// Given a list of shards, some of which contain data, fills in the
-// ones that don't have data.
-//
-// The length of the array must be equal to Shards.
-// You indicate that a shard is missing by setting it to nil or zero-length.
-// If a shard is zero-length but has sufficient capacity, that memory will
-// be used, otherwise a new []byte will be allocated.
-//
-// If there are too few shards to reconstruct the missing
-// ones, ErrTooFewShards will be returned.
-//
-// The reconstructed shard set is complete, but integrity is not verified.
-// Use the Verify function to check if data set is ok.
-func (r reedSolomon) Reconstruct(shards [][]byte) error {
-	return r.reconstruct(shards, false)
-}
-
-// ReconstructData will recreate any missing data shards, if possible.
-//
-// Given a list of shards, some of which contain data, fills in the
-// data shards that don't have data.
-//
-// The length of the array must be equal to Shards.
-// You indicate that a shard is missing by setting it to nil or zero-length.
-// If a shard is zero-length but has sufficient capacity, that memory will
-// be used, otherwise a new []byte will be allocated.
-//
-// If there are too few shards to reconstruct the missing
-// ones, ErrTooFewShards will be returned.
-//
-// As the reconstructed shard set may contain missing parity shards,
-// calling the Verify function is likely to fail.
-func (r reedSolomon) ReconstructData(shards [][]byte) error {
-	return r.reconstruct(shards, true)
-}
-
-// reconstruct will recreate the missing data shards, and unless
-// dataOnly is true, also the missing parity shards
-//
-// The length of the array must be equal to Shards.
-// You indicate that a shard is missing by setting it to nil.
-//
-// If there are too few shards to reconstruct the missing
-// ones, ErrTooFewShards will be returned.
-func (r reedSolomon) reconstruct(shards [][]byte, dataOnly bool) error {
-	if len(shards) != r.Shards {
-		return ErrTooFewShards
-	}
-	// Check arguments.
-	err := checkShards(shards, true)
-	if err != nil {
-		return err
-	}
-
-	shardSize := shardSize(shards)
-
-	// Quick check: are all of the shards present?  If so, there's
-	// nothing to do.
-	numberPresent := 0
-	for i := 0; i < r.Shards; i++ {
-		if len(shards[i]) != 0 {
-			numberPresent++
-		}
-	}
-	if numberPresent == r.Shards {
-		// Cool.  All of the shards data data.  We don't
-		// need to do anything.
-		return nil
-	}
-
-	// More complete sanity check
-	if numberPresent < r.DataShards {
-		return ErrTooFewShards
-	}
-
-	// Pull out an array holding just the shards that
-	// correspond to the rows of the submatrix.  These shards
-	// will be the input to the decoding process that re-creates
-	// the missing data shards.
-	//
-	// Also, create an array of indices of the valid rows we do have
-	// and the invalid rows we don't have up until we have enough valid rows.
-	subShards := make([][]byte, r.DataShards)
-	validIndices := make([]int, r.DataShards)
-	invalidIndices := make([]int, 0)
-	subMatrixRow := 0
-	for matrixRow := 0; matrixRow < r.Shards && subMatrixRow < r.DataShards; matrixRow++ {
-		if len(shards[matrixRow]) != 0 {
-			subShards[subMatrixRow] = shards[matrixRow]
-			validIndices[subMatrixRow] = matrixRow
-			subMatrixRow++
-		} else {
-			invalidIndices = append(invalidIndices, matrixRow)
-		}
-	}
-
-	// Attempt to get the cached inverted matrix out of the tree
-	// based on the indices of the invalid rows.
-	dataDecodeMatrix := r.tree.GetInvertedMatrix(invalidIndices)
-
-	// If the inverted matrix isn't cached in the tree yet we must
-	// construct it ourselves and insert it into the tree for the
-	// future.  In this way the inversion tree is lazily loaded.
-	if dataDecodeMatrix == nil {
-		// Pull out the rows of the matrix that correspond to the
-		// shards that we have and build a square matrix.  This
-		// matrix could be used to generate the shards that we have
-		// from the original data.
-		subMatrix, _ := newMatrix(r.DataShards, r.DataShards)
-		for subMatrixRow, validIndex := range validIndices {
-			for c := 0; c < r.DataShards; c++ {
-				subMatrix[subMatrixRow][c] = r.m[validIndex][c]
-			}
-		}
-		// Invert the matrix, so we can go from the encoded shards
-		// back to the original data.  Then pull out the row that
-		// generates the shard that we want to decode.  Note that
-		// since this matrix maps back to the original data, it can
-		// be used to create a data shard, but not a parity shard.
-		dataDecodeMatrix, err = subMatrix.Invert()
-		if err != nil {
-			return err
-		}
-
-		// Cache the inverted matrix in the tree for future use keyed on the
-		// indices of the invalid rows.
-		err = r.tree.InsertInvertedMatrix(invalidIndices, dataDecodeMatrix, r.Shards)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Re-create any data shards that were missing.
-	//
-	// The input to the coding is all of the shards we actually
-	// have, and the output is the missing data shards.  The computation
-	// is done using the special decode matrix we just built.
-	outputs := make([][]byte, r.ParityShards)
-	matrixRows := make([][]byte, r.ParityShards)
-	outputCount := 0
-
-	for iShard := 0; iShard < r.DataShards; iShard++ {
-		if len(shards[iShard]) == 0 {
-			if cap(shards[iShard]) >= shardSize {
-				shards[iShard] = shards[iShard][0:shardSize]
-			} else {
-				shards[iShard] = make([]byte, shardSize)
-			}
-			outputs[outputCount] = shards[iShard]
-			matrixRows[outputCount] = dataDecodeMatrix[iShard]
-			outputCount++
-		}
-	}
-	r.codeSomeShards(matrixRows, subShards, outputs[:outputCount], outputCount, shardSize)
-
-	if dataOnly {
-		// Exit out early if we are only interested in the data shards
-		return nil
-	}
-
-	// Now that we have all of the data shards intact, we can
-	// compute any of the parity that is missing.
-	//
-	// The input to the coding is ALL of the data shards, including
-	// any that we just calculated.  The output is whichever of the
-	// data shards were missing.
-	outputCount = 0
-	for iShard := r.DataShards; iShard < r.Shards; iShard++ {
-		if len(shards[iShard]) == 0 {
-			if cap(shards[iShard]) >= shardSize {
-				shards[iShard] = shards[iShard][0:shardSize]
-			} else {
-				shards[iShard] = make([]byte, shardSize)
-			}
-			outputs[outputCount] = shards[iShard]
-			matrixRows[outputCount] = r.parity[iShard-r.DataShards]
-			outputCount++
-		}
-	}
-	r.codeSomeShards(matrixRows, shards[:r.DataShards], outputs[:outputCount], outputCount, shardSize)
-	return nil
-}
-
-// ErrShortData will be returned by Split(), if there isn't enough data
-// to fill the number of shards.
-var ErrShortData = errors.New("not enough data to fill the number of requested shards")
-
-// Split a data slice into the number of shards given to the encoder,
-// and create empty parity shards if necessary.
-//
-// The data will be split into equally sized shards.
-// If the data size isn't divisible by the number of shards,
-// the last shard will contain extra zeros.
-//
-// There must be at least 1 byte otherwise ErrShortData will be
-// returned.
-//
-// The data will not be copied, except for the last shard, so you
-// should not modify the data of the input slice afterwards.
-func (r reedSolomon) Split(data []byte) ([][]byte, error) {
-	if len(data) == 0 {
-		return nil, ErrShortData
-	}
-	// Calculate number of bytes per data shard.
-	perShard := (len(data) + r.DataShards - 1) / r.DataShards
-
-	if cap(data) > len(data) {
-		data = data[:cap(data)]
-	}
-
-	// Only allocate memory if necessary
-	if len(data) < (r.Shards * perShard) {
-		// Pad data to r.Shards*perShard.
-		padding := make([]byte, (r.Shards*perShard)-len(data))
-		data = append(data, padding...)
-	}
-
-	// Split into equal-length shards.
-	dst := make([][]byte, r.Shards)
-	for i := range dst {
-		dst[i] = data[:perShard]
-		data = data[perShard:]
-	}
-
-	return dst, nil
-}
-
-// ErrReconstructRequired is returned if too few data shards are intact and a
-// reconstruction is required before you can successfully join the shards.
-var ErrReconstructRequired = errors.New("reconstruction required as one or more required data shards are nil")
-
-// Join the shards and write the data segment to dst.
-//
-// Only the data shards are considered.
-// You must supply the exact output size you want.
-//
-// If there are to few shards given, ErrTooFewShards will be returned.
-// If the total data size is less than outSize, ErrShortData will be returned.
-// If one or more required data shards are nil, ErrReconstructRequired will be returned.
-func (r reedSolomon) Join(dst io.Writer, shards [][]byte, outSize int) error {
-	// Do we have enough shards?
-	if len(shards) < r.DataShards {
-		return ErrTooFewShards
-	}
-	shards = shards[:r.DataShards]
-
-	// Do we have enough data?
-	size := 0
-	for _, shard := range shards {
-		if shard == nil {
-			return ErrReconstructRequired
-		}
-		size += len(shard)
-
-		// Do we have enough data already?
-		if size >= outSize {
-			break
-		}
-	}
-	if size < outSize {
-		return ErrShortData
-	}
-
-	// Copy data to dst
-	write := outSize
-	for _, shard := range shards {
-		if write < len(shard) {
-			_, err := dst.Write(shard[:write])
-			return err
-		}
-		n, err := dst.Write(shard)
-		if err != nil {
-			return err
-		}
-		write -= n
-	}
-	return nil
-}

+ 0 - 584
vendor/github.com/klauspost/reedsolomon/streaming.go

@@ -1,584 +0,0 @@
-/**
- * Reed-Solomon Coding over 8-bit values.
- *
- * Copyright 2015, Klaus Post
- * Copyright 2015, Backblaze, Inc.
- */
-
-package reedsolomon
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"io"
-	"sync"
-)
-
-// StreamEncoder is an interface to encode Reed-Salomon parity sets for your data.
-// It provides a fully streaming interface, and processes data in blocks of up to 4MB.
-//
-// For small shard sizes, 10MB and below, it is recommended to use the in-memory interface,
-// since the streaming interface has a start up overhead.
-//
-// For all operations, no readers and writers should not assume any order/size of
-// individual reads/writes.
-//
-// For usage examples, see "stream-encoder.go" and "streamdecoder.go" in the examples
-// folder.
-type StreamEncoder interface {
-	// Encodes parity shards for a set of data shards.
-	//
-	// Input is 'shards' containing readers for data shards followed by parity shards
-	// io.Writer.
-	//
-	// The number of shards must match the number given to NewStream().
-	//
-	// Each reader must supply the same number of bytes.
-	//
-	// The parity shards will be written to the writer.
-	// The number of bytes written will match the input size.
-	//
-	// If a data stream returns an error, a StreamReadError type error
-	// will be returned. If a parity writer returns an error, a
-	// StreamWriteError will be returned.
-	Encode(data []io.Reader, parity []io.Writer) error
-
-	// Verify returns true if the parity shards contain correct data.
-	//
-	// The number of shards must match the number total data+parity shards
-	// given to NewStream().
-	//
-	// Each reader must supply the same number of bytes.
-	// If a shard stream returns an error, a StreamReadError type error
-	// will be returned.
-	Verify(shards []io.Reader) (bool, error)
-
-	// Reconstruct will recreate the missing shards if possible.
-	//
-	// Given a list of valid shards (to read) and invalid shards (to write)
-	//
-	// You indicate that a shard is missing by setting it to nil in the 'valid'
-	// slice and at the same time setting a non-nil writer in "fill".
-	// An index cannot contain both non-nil 'valid' and 'fill' entry.
-	// If both are provided 'ErrReconstructMismatch' is returned.
-	//
-	// If there are too few shards to reconstruct the missing
-	// ones, ErrTooFewShards will be returned.
-	//
-	// The reconstructed shard set is complete, but integrity is not verified.
-	// Use the Verify function to check if data set is ok.
-	Reconstruct(valid []io.Reader, fill []io.Writer) error
-
-	// Split a an input stream into the number of shards given to the encoder.
-	//
-	// The data will be split into equally sized shards.
-	// If the data size isn't dividable by the number of shards,
-	// the last shard will contain extra zeros.
-	//
-	// You must supply the total size of your input.
-	// 'ErrShortData' will be returned if it is unable to retrieve the
-	// number of bytes indicated.
-	Split(data io.Reader, dst []io.Writer, size int64) (err error)
-
-	// Join the shards and write the data segment to dst.
-	//
-	// Only the data shards are considered.
-	//
-	// You must supply the exact output size you want.
-	// If there are to few shards given, ErrTooFewShards will be returned.
-	// If the total data size is less than outSize, ErrShortData will be returned.
-	Join(dst io.Writer, shards []io.Reader, outSize int64) error
-}
-
-// StreamReadError is returned when a read error is encountered
-// that relates to a supplied stream.
-// This will allow you to find out which reader has failed.
-type StreamReadError struct {
-	Err    error // The error
-	Stream int   // The stream number on which the error occurred
-}
-
-// Error returns the error as a string
-func (s StreamReadError) Error() string {
-	return fmt.Sprintf("error reading stream %d: %s", s.Stream, s.Err)
-}
-
-// String returns the error as a string
-func (s StreamReadError) String() string {
-	return s.Error()
-}
-
-// StreamWriteError is returned when a write error is encountered
-// that relates to a supplied stream. This will allow you to
-// find out which reader has failed.
-type StreamWriteError struct {
-	Err    error // The error
-	Stream int   // The stream number on which the error occurred
-}
-
-// Error returns the error as a string
-func (s StreamWriteError) Error() string {
-	return fmt.Sprintf("error writing stream %d: %s", s.Stream, s.Err)
-}
-
-// String returns the error as a string
-func (s StreamWriteError) String() string {
-	return s.Error()
-}
-
-// rsStream contains a matrix for a specific
-// distribution of datashards and parity shards.
-// Construct if using NewStream()
-type rsStream struct {
-	r  *reedSolomon
-	bs int // Block size
-	// Shard reader
-	readShards func(dst [][]byte, in []io.Reader) error
-	// Shard writer
-	writeShards func(out []io.Writer, in [][]byte) error
-	creads      bool
-	cwrites     bool
-}
-
-// NewStream creates a new encoder and initializes it to
-// the number of data shards and parity shards that
-// you want to use. You can reuse this encoder.
-// Note that the maximum number of data shards is 256.
-func NewStream(dataShards, parityShards int, o ...Option) (StreamEncoder, error) {
-	enc, err := New(dataShards, parityShards, o...)
-	if err != nil {
-		return nil, err
-	}
-	rs := enc.(*reedSolomon)
-	r := rsStream{r: rs, bs: 4 << 20}
-	r.readShards = readShards
-	r.writeShards = writeShards
-	return &r, err
-}
-
-// NewStreamC creates a new encoder and initializes it to
-// the number of data shards and parity shards given.
-//
-// This functions as 'NewStream', but allows you to enable CONCURRENT reads and writes.
-func NewStreamC(dataShards, parityShards int, conReads, conWrites bool, o ...Option) (StreamEncoder, error) {
-	enc, err := New(dataShards, parityShards, o...)
-	if err != nil {
-		return nil, err
-	}
-	rs := enc.(*reedSolomon)
-	r := rsStream{r: rs, bs: 4 << 20}
-	r.readShards = readShards
-	r.writeShards = writeShards
-	if conReads {
-		r.readShards = cReadShards
-	}
-	if conWrites {
-		r.writeShards = cWriteShards
-	}
-	return &r, err
-}
-
-func createSlice(n, length int) [][]byte {
-	out := make([][]byte, n)
-	for i := range out {
-		out[i] = make([]byte, length)
-	}
-	return out
-}
-
-// Encodes parity shards for a set of data shards.
-//
-// Input is 'shards' containing readers for data shards followed by parity shards
-// io.Writer.
-//
-// The number of shards must match the number given to NewStream().
-//
-// Each reader must supply the same number of bytes.
-//
-// The parity shards will be written to the writer.
-// The number of bytes written will match the input size.
-//
-// If a data stream returns an error, a StreamReadError type error
-// will be returned. If a parity writer returns an error, a
-// StreamWriteError will be returned.
-func (r rsStream) Encode(data []io.Reader, parity []io.Writer) error {
-	if len(data) != r.r.DataShards {
-		return ErrTooFewShards
-	}
-
-	if len(parity) != r.r.ParityShards {
-		return ErrTooFewShards
-	}
-
-	all := createSlice(r.r.Shards, r.bs)
-	in := all[:r.r.DataShards]
-	out := all[r.r.DataShards:]
-	read := 0
-
-	for {
-		err := r.readShards(in, data)
-		switch err {
-		case nil:
-		case io.EOF:
-			if read == 0 {
-				return ErrShardNoData
-			}
-			return nil
-		default:
-			return err
-		}
-		out = trimShards(out, shardSize(in))
-		read += shardSize(in)
-		err = r.r.Encode(all)
-		if err != nil {
-			return err
-		}
-		err = r.writeShards(parity, out)
-		if err != nil {
-			return err
-		}
-	}
-}
-
-// Trim the shards so they are all the same size
-func trimShards(in [][]byte, size int) [][]byte {
-	for i := range in {
-		if in[i] != nil {
-			in[i] = in[i][0:size]
-		}
-		if len(in[i]) < size {
-			in[i] = nil
-		}
-	}
-	return in
-}
-
-func readShards(dst [][]byte, in []io.Reader) error {
-	if len(in) != len(dst) {
-		panic("internal error: in and dst size do not match")
-	}
-	size := -1
-	for i := range in {
-		if in[i] == nil {
-			dst[i] = nil
-			continue
-		}
-		n, err := io.ReadFull(in[i], dst[i])
-		// The error is EOF only if no bytes were read.
-		// If an EOF happens after reading some but not all the bytes,
-		// ReadFull returns ErrUnexpectedEOF.
-		switch err {
-		case io.ErrUnexpectedEOF, io.EOF:
-			if size < 0 {
-				size = n
-			} else if n != size {
-				// Shard sizes must match.
-				return ErrShardSize
-			}
-			dst[i] = dst[i][0:n]
-		case nil:
-			continue
-		default:
-			return StreamReadError{Err: err, Stream: i}
-		}
-	}
-	if size == 0 {
-		return io.EOF
-	}
-	return nil
-}
-
-func writeShards(out []io.Writer, in [][]byte) error {
-	if len(out) != len(in) {
-		panic("internal error: in and out size do not match")
-	}
-	for i := range in {
-		if out[i] == nil {
-			continue
-		}
-		n, err := out[i].Write(in[i])
-		if err != nil {
-			return StreamWriteError{Err: err, Stream: i}
-		}
-		//
-		if n != len(in[i]) {
-			return StreamWriteError{Err: io.ErrShortWrite, Stream: i}
-		}
-	}
-	return nil
-}
-
-type readResult struct {
-	n    int
-	size int
-	err  error
-}
-
-// cReadShards reads shards concurrently
-func cReadShards(dst [][]byte, in []io.Reader) error {
-	if len(in) != len(dst) {
-		panic("internal error: in and dst size do not match")
-	}
-	var wg sync.WaitGroup
-	wg.Add(len(in))
-	res := make(chan readResult, len(in))
-	for i := range in {
-		if in[i] == nil {
-			dst[i] = nil
-			wg.Done()
-			continue
-		}
-		go func(i int) {
-			defer wg.Done()
-			n, err := io.ReadFull(in[i], dst[i])
-			// The error is EOF only if no bytes were read.
-			// If an EOF happens after reading some but not all the bytes,
-			// ReadFull returns ErrUnexpectedEOF.
-			res <- readResult{size: n, err: err, n: i}
-
-		}(i)
-	}
-	wg.Wait()
-	close(res)
-	size := -1
-	for r := range res {
-		switch r.err {
-		case io.ErrUnexpectedEOF, io.EOF:
-			if size < 0 {
-				size = r.size
-			} else if r.size != size {
-				// Shard sizes must match.
-				return ErrShardSize
-			}
-			dst[r.n] = dst[r.n][0:r.size]
-		case nil:
-		default:
-			return StreamReadError{Err: r.err, Stream: r.n}
-		}
-	}
-	if size == 0 {
-		return io.EOF
-	}
-	return nil
-}
-
-// cWriteShards writes shards concurrently
-func cWriteShards(out []io.Writer, in [][]byte) error {
-	if len(out) != len(in) {
-		panic("internal error: in and out size do not match")
-	}
-	var errs = make(chan error, len(out))
-	var wg sync.WaitGroup
-	wg.Add(len(out))
-	for i := range in {
-		go func(i int) {
-			defer wg.Done()
-			if out[i] == nil {
-				errs <- nil
-				return
-			}
-			n, err := out[i].Write(in[i])
-			if err != nil {
-				errs <- StreamWriteError{Err: err, Stream: i}
-				return
-			}
-			if n != len(in[i]) {
-				errs <- StreamWriteError{Err: io.ErrShortWrite, Stream: i}
-			}
-		}(i)
-	}
-	wg.Wait()
-	close(errs)
-	for err := range errs {
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// Verify returns true if the parity shards contain correct data.
-//
-// The number of shards must match the number total data+parity shards
-// given to NewStream().
-//
-// Each reader must supply the same number of bytes.
-// If a shard stream returns an error, a StreamReadError type error
-// will be returned.
-func (r rsStream) Verify(shards []io.Reader) (bool, error) {
-	if len(shards) != r.r.Shards {
-		return false, ErrTooFewShards
-	}
-
-	read := 0
-	all := createSlice(r.r.Shards, r.bs)
-	for {
-		err := r.readShards(all, shards)
-		if err == io.EOF {
-			if read == 0 {
-				return false, ErrShardNoData
-			}
-			return true, nil
-		}
-		if err != nil {
-			return false, err
-		}
-		read += shardSize(all)
-		ok, err := r.r.Verify(all)
-		if !ok || err != nil {
-			return ok, err
-		}
-	}
-}
-
-// ErrReconstructMismatch is returned by the StreamEncoder, if you supply
-// "valid" and "fill" streams on the same index.
-// Therefore it is impossible to see if you consider the shard valid
-// or would like to have it reconstructed.
-var ErrReconstructMismatch = errors.New("valid shards and fill shards are mutually exclusive")
-
-// Reconstruct will recreate the missing shards if possible.
-//
-// Given a list of valid shards (to read) and invalid shards (to write)
-//
-// You indicate that a shard is missing by setting it to nil in the 'valid'
-// slice and at the same time setting a non-nil writer in "fill".
-// An index cannot contain both non-nil 'valid' and 'fill' entry.
-//
-// If there are too few shards to reconstruct the missing
-// ones, ErrTooFewShards will be returned.
-//
-// The reconstructed shard set is complete when explicitly asked for all missing shards.
-// However its integrity is not automatically verified.
-// Use the Verify function to check in case the data set is complete.
-func (r rsStream) Reconstruct(valid []io.Reader, fill []io.Writer) error {
-	if len(valid) != r.r.Shards {
-		return ErrTooFewShards
-	}
-	if len(fill) != r.r.Shards {
-		return ErrTooFewShards
-	}
-
-	all := createSlice(r.r.Shards, r.bs)
-	reconDataOnly := true
-	for i := range valid {
-		if valid[i] != nil && fill[i] != nil {
-			return ErrReconstructMismatch
-		}
-		if i >= r.r.DataShards && fill[i] != nil {
-			reconDataOnly = false
-		}
-	}
-
-	read := 0
-	for {
-		err := r.readShards(all, valid)
-		if err == io.EOF {
-			if read == 0 {
-				return ErrShardNoData
-			}
-			return nil
-		}
-		if err != nil {
-			return err
-		}
-		read += shardSize(all)
-		all = trimShards(all, shardSize(all))
-
-		if reconDataOnly {
-			err = r.r.ReconstructData(all) // just reconstruct missing data shards
-		} else {
-			err = r.r.Reconstruct(all) //  reconstruct all missing shards
-		}
-		if err != nil {
-			return err
-		}
-		err = r.writeShards(fill, all)
-		if err != nil {
-			return err
-		}
-	}
-}
-
-// Join the shards and write the data segment to dst.
-//
-// Only the data shards are considered.
-//
-// You must supply the exact output size you want.
-// If there are to few shards given, ErrTooFewShards will be returned.
-// If the total data size is less than outSize, ErrShortData will be returned.
-func (r rsStream) Join(dst io.Writer, shards []io.Reader, outSize int64) error {
-	// Do we have enough shards?
-	if len(shards) < r.r.DataShards {
-		return ErrTooFewShards
-	}
-
-	// Trim off parity shards if any
-	shards = shards[:r.r.DataShards]
-	for i := range shards {
-		if shards[i] == nil {
-			return StreamReadError{Err: ErrShardNoData, Stream: i}
-		}
-	}
-	// Join all shards
-	src := io.MultiReader(shards...)
-
-	// Copy data to dst
-	n, err := io.CopyN(dst, src, outSize)
-	if err == io.EOF {
-		return ErrShortData
-	}
-	if err != nil {
-		return err
-	}
-	if n != outSize {
-		return ErrShortData
-	}
-	return nil
-}
-
-// Split a an input stream into the number of shards given to the encoder.
-//
-// The data will be split into equally sized shards.
-// If the data size isn't dividable by the number of shards,
-// the last shard will contain extra zeros.
-//
-// You must supply the total size of your input.
-// 'ErrShortData' will be returned if it is unable to retrieve the
-// number of bytes indicated.
-func (r rsStream) Split(data io.Reader, dst []io.Writer, size int64) error {
-	if size == 0 {
-		return ErrShortData
-	}
-	if len(dst) != r.r.DataShards {
-		return ErrInvShardNum
-	}
-
-	for i := range dst {
-		if dst[i] == nil {
-			return StreamWriteError{Err: ErrShardNoData, Stream: i}
-		}
-	}
-
-	// Calculate number of bytes per shard.
-	perShard := (size + int64(r.r.DataShards) - 1) / int64(r.r.DataShards)
-
-	// Pad data to r.Shards*perShard.
-	padding := make([]byte, (int64(r.r.Shards)*perShard)-size)
-	data = io.MultiReader(data, bytes.NewBuffer(padding))
-
-	// Split into equal-length shards and copy.
-	for i := range dst {
-		n, err := io.CopyN(dst[i], data, perShard)
-		if err != io.EOF && err != nil {
-			return err
-		}
-		if n != perShard {
-			return ErrShortData
-		}
-	}
-
-	return nil
-}

+ 0 - 202
vendor/github.com/minio/minio-go/LICENSE

@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.

+ 0 - 629
vendor/github.com/minio/minio-go/api-compose-object.go

@@ -1,629 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"encoding/base64"
-	"fmt"
-	"net/http"
-	"net/url"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// SSEInfo - represents Server-Side-Encryption parameters specified by
-// a user.
-type SSEInfo struct {
-	key  []byte
-	algo string
-}
-
-// NewSSEInfo - specifies (binary or un-encoded) encryption key and
-// algorithm name. If algo is empty, it defaults to "AES256". Ref:
-// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
-func NewSSEInfo(key []byte, algo string) SSEInfo {
-	if algo == "" {
-		algo = "AES256"
-	}
-	return SSEInfo{key, algo}
-}
-
-// internal method that computes SSE-C headers
-func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
-	if s == nil {
-		return nil
-	}
-
-	cs := ""
-	if isCopySource {
-		cs = "copy-source-"
-	}
-	return map[string]string{
-		"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
-		"x-amz-" + cs + "server-side-encryption-customer-key":       base64.StdEncoding.EncodeToString(s.key),
-		"x-amz-" + cs + "server-side-encryption-customer-key-MD5":   sumMD5Base64(s.key),
-	}
-}
-
-// GetSSEHeaders - computes and returns headers for SSE-C as key-value
-// pairs. They can be set as metadata in PutObject* requests (for
-// encryption) or be set as request headers in `Core.GetObject` (for
-// decryption).
-func (s *SSEInfo) GetSSEHeaders() map[string]string {
-	return s.getSSEHeaders(false)
-}
-
-// DestinationInfo - type with information about the object to be
-// created via server-side copy requests, using the Compose API.
-type DestinationInfo struct {
-	bucket, object string
-
-	// key for encrypting destination
-	encryption *SSEInfo
-
-	// if no user-metadata is provided, it is copied from source
-	// (when there is only once source object in the compose
-	// request)
-	userMetadata map[string]string
-}
-
-// NewDestinationInfo - creates a compose-object/copy-source
-// destination info object.
-//
-// `encSSEC` is the key info for server-side-encryption with customer
-// provided key. If it is nil, no encryption is performed.
-//
-// `userMeta` is the user-metadata key-value pairs to be set on the
-// destination. The keys are automatically prefixed with `x-amz-meta-`
-// if needed. If nil is passed, and if only a single source (of any
-// size) is provided in the ComposeObject call, then metadata from the
-// source is copied to the destination.
-func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
-	userMeta map[string]string) (d DestinationInfo, err error) {
-
-	// Input validation.
-	if err = s3utils.CheckValidBucketName(bucket); err != nil {
-		return d, err
-	}
-	if err = s3utils.CheckValidObjectName(object); err != nil {
-		return d, err
-	}
-
-	// Process custom-metadata to remove a `x-amz-meta-` prefix if
-	// present and validate that keys are distinct (after this
-	// prefix removal).
-	m := make(map[string]string)
-	for k, v := range userMeta {
-		if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
-			k = k[len("x-amz-meta-"):]
-		}
-		if _, ok := m[k]; ok {
-			return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k))
-		}
-		m[k] = v
-	}
-
-	return DestinationInfo{
-		bucket:       bucket,
-		object:       object,
-		encryption:   encryptSSEC,
-		userMetadata: m,
-	}, nil
-}
-
-// getUserMetaHeadersMap - construct appropriate key-value pairs to send
-// as headers from metadata map to pass into copy-object request. For
-// single part copy-object (i.e. non-multipart object), enable the
-// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to
-// `REPLACE`, so that metadata headers from the source are not copied
-// over.
-func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string {
-	if len(d.userMetadata) == 0 {
-		return nil
-	}
-	r := make(map[string]string)
-	if withCopyDirectiveHeader {
-		r["x-amz-metadata-directive"] = "REPLACE"
-	}
-	for k, v := range d.userMetadata {
-		r["x-amz-meta-"+k] = v
-	}
-	return r
-}
-
-// SourceInfo - represents a source object to be copied, using
-// server-side copying APIs.
-type SourceInfo struct {
-	bucket, object string
-
-	start, end int64
-
-	decryptKey *SSEInfo
-	// Headers to send with the upload-part-copy request involving
-	// this source object.
-	Headers http.Header
-}
-
-// NewSourceInfo - create a compose-object/copy-object source info
-// object.
-//
-// `decryptSSEC` is the decryption key using server-side-encryption
-// with customer provided key. It may be nil if the source is not
-// encrypted.
-func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo {
-	r := SourceInfo{
-		bucket:     bucket,
-		object:     object,
-		start:      -1, // range is unspecified by default
-		decryptKey: decryptSSEC,
-		Headers:    make(http.Header),
-	}
-
-	// Set the source header
-	r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
-
-	// Assemble decryption headers for upload-part-copy request
-	for k, v := range decryptSSEC.getSSEHeaders(true) {
-		r.Headers.Set(k, v)
-	}
-
-	return r
-}
-
-// SetRange - Set the start and end offset of the source object to be
-// copied. If this method is not called, the whole source object is
-// copied.
-func (s *SourceInfo) SetRange(start, end int64) error {
-	if start > end || start < 0 {
-		return ErrInvalidArgument("start must be non-negative, and start must be at most end.")
-	}
-	// Note that 0 <= start <= end
-	s.start, s.end = start, end
-	return nil
-}
-
-// SetMatchETagCond - Set ETag match condition. The object is copied
-// only if the etag of the source matches the value given here.
-func (s *SourceInfo) SetMatchETagCond(etag string) error {
-	if etag == "" {
-		return ErrInvalidArgument("ETag cannot be empty.")
-	}
-	s.Headers.Set("x-amz-copy-source-if-match", etag)
-	return nil
-}
-
-// SetMatchETagExceptCond - Set the ETag match exception
-// condition. The object is copied only if the etag of the source is
-// not the value given here.
-func (s *SourceInfo) SetMatchETagExceptCond(etag string) error {
-	if etag == "" {
-		return ErrInvalidArgument("ETag cannot be empty.")
-	}
-	s.Headers.Set("x-amz-copy-source-if-none-match", etag)
-	return nil
-}
-
-// SetModifiedSinceCond - Set the modified since condition.
-func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error {
-	if modTime.IsZero() {
-		return ErrInvalidArgument("Input time cannot be 0.")
-	}
-	s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat))
-	return nil
-}
-
-// SetUnmodifiedSinceCond - Set the unmodified since condition.
-func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error {
-	if modTime.IsZero() {
-		return ErrInvalidArgument("Input time cannot be 0.")
-	}
-	s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat))
-	return nil
-}
-
-// Helper to fetch size and etag of an object using a StatObject call.
-func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) {
-	// Get object info - need size and etag here. Also, decryption
-	// headers are added to the stat request if given.
-	var objInfo ObjectInfo
-	opts := StatObjectOptions{}
-	for k, v := range s.decryptKey.getSSEHeaders(false) {
-		opts.Set(k, v)
-	}
-	objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts)
-	if err != nil {
-		err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err))
-	} else {
-		size = objInfo.Size
-		etag = objInfo.ETag
-		userMeta = make(map[string]string)
-		for k, v := range objInfo.Metadata {
-			if strings.HasPrefix(k, "x-amz-meta-") {
-				if len(v) > 0 {
-					userMeta[k] = v[0]
-				}
-			}
-		}
-	}
-	return
-}
-
-// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
-func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
-	metadata map[string]string) (ObjectInfo, error) {
-
-	// Build headers.
-	headers := make(http.Header)
-
-	// Set all the metadata headers.
-	for k, v := range metadata {
-		headers.Set(k, v)
-	}
-
-	// Set the source header
-	headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
-
-	// Send upload-part-copy request
-	resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
-		bucketName:   destBucket,
-		objectName:   destObject,
-		customHeader: headers,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return ObjectInfo{}, err
-	}
-
-	// Check if we got an error response.
-	if resp.StatusCode != http.StatusOK {
-		return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
-	}
-
-	cpObjRes := copyObjectResult{}
-	err = xmlDecoder(resp.Body, &cpObjRes)
-	if err != nil {
-		return ObjectInfo{}, err
-	}
-
-	objInfo := ObjectInfo{
-		Key:          destObject,
-		ETag:         strings.Trim(cpObjRes.ETag, "\""),
-		LastModified: cpObjRes.LastModified,
-	}
-	return objInfo, nil
-}
-
-func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
-	partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
-
-	headers := make(http.Header)
-
-	// Set source
-	headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
-
-	if startOffset < 0 {
-		return p, ErrInvalidArgument("startOffset must be non-negative")
-	}
-
-	if length >= 0 {
-		headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
-	}
-
-	for k, v := range metadata {
-		headers.Set(k, v)
-	}
-
-	queryValues := make(url.Values)
-	queryValues.Set("partNumber", strconv.Itoa(partID))
-	queryValues.Set("uploadId", uploadID)
-
-	resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
-		bucketName:   destBucket,
-		objectName:   destObject,
-		customHeader: headers,
-		queryValues:  queryValues,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return
-	}
-
-	// Check if we got an error response.
-	if resp.StatusCode != http.StatusOK {
-		return p, httpRespToErrorResponse(resp, destBucket, destObject)
-	}
-
-	// Decode copy-part response on success.
-	cpObjRes := copyObjectResult{}
-	err = xmlDecoder(resp.Body, &cpObjRes)
-	if err != nil {
-		return p, err
-	}
-	p.PartNumber, p.ETag = partID, cpObjRes.ETag
-	return p, nil
-}
-
-// uploadPartCopy - helper function to create a part in a multipart
-// upload via an upload-part-copy request
-// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
-func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
-	headers http.Header) (p CompletePart, err error) {
-
-	// Build query parameters
-	urlValues := make(url.Values)
-	urlValues.Set("partNumber", strconv.Itoa(partNumber))
-	urlValues.Set("uploadId", uploadID)
-
-	// Send upload-part-copy request
-	resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
-		bucketName:   bucket,
-		objectName:   object,
-		customHeader: headers,
-		queryValues:  urlValues,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return p, err
-	}
-
-	// Check if we got an error response.
-	if resp.StatusCode != http.StatusOK {
-		return p, httpRespToErrorResponse(resp, bucket, object)
-	}
-
-	// Decode copy-part response on success.
-	cpObjRes := copyObjectResult{}
-	err = xmlDecoder(resp.Body, &cpObjRes)
-	if err != nil {
-		return p, err
-	}
-	p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
-	return p, nil
-}
-
-// ComposeObject - creates an object using server-side copying of
-// existing objects. It takes a list of source objects (with optional
-// offsets) and concatenates them into a new object using only
-// server-side copying operations.
-func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
-	if len(srcs) < 1 || len(srcs) > maxPartsCount {
-		return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
-	}
-	ctx := context.Background()
-	srcSizes := make([]int64, len(srcs))
-	var totalSize, size, totalParts int64
-	var srcUserMeta map[string]string
-	var etag string
-	var err error
-	for i, src := range srcs {
-		size, etag, srcUserMeta, err = src.getProps(c)
-		if err != nil {
-			return err
-		}
-
-		// Error out if client side encryption is used in this source object when
-		// more than one source objects are given.
-		if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" {
-			return ErrInvalidArgument(
-				fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
-		}
-
-		// Since we did a HEAD to get size, we use the ETag
-		// value to make sure the object has not changed by
-		// the time we perform the copy. This is done, only if
-		// the user has not set their own ETag match
-		// condition.
-		if src.Headers.Get("x-amz-copy-source-if-match") == "" {
-			src.SetMatchETagCond(etag)
-		}
-
-		// Check if a segment is specified, and if so, is the
-		// segment within object bounds?
-		if src.start != -1 {
-			// Since range is specified,
-			//    0 <= src.start <= src.end
-			// so only invalid case to check is:
-			if src.end >= size {
-				return ErrInvalidArgument(
-					fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)",
-						i, src.start, src.end, size))
-			}
-			size = src.end - src.start + 1
-		}
-
-		// Only the last source may be less than `absMinPartSize`
-		if size < absMinPartSize && i < len(srcs)-1 {
-			return ErrInvalidArgument(
-				fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size))
-		}
-
-		// Is data to copy too large?
-		totalSize += size
-		if totalSize > maxMultipartPutObjectSize {
-			return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
-		}
-
-		// record source size
-		srcSizes[i] = size
-
-		// calculate parts needed for current source
-		totalParts += partsRequired(size)
-		// Do we need more parts than we are allowed?
-		if totalParts > maxPartsCount {
-			return ErrInvalidArgument(fmt.Sprintf(
-				"Your proposed compose object requires more than %d parts", maxPartsCount))
-		}
-	}
-
-	// Single source object case (i.e. when only one source is
-	// involved, it is being copied wholly and at most 5GiB in
-	// size).
-	if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize {
-		h := srcs[0].Headers
-		// Add destination encryption headers
-		for k, v := range dst.encryption.getSSEHeaders(false) {
-			h.Set(k, v)
-		}
-
-		// If no user metadata is specified (and so, the
-		// for-loop below is not entered), metadata from the
-		// source is copied to the destination (due to
-		// single-part copy-object PUT request behaviour).
-		for k, v := range dst.getUserMetaHeadersMap(true) {
-			h.Set(k, v)
-		}
-
-		// Send copy request
-		resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
-			bucketName:   dst.bucket,
-			objectName:   dst.object,
-			customHeader: h,
-		})
-		defer closeResponse(resp)
-		if err != nil {
-			return err
-		}
-		// Check if we got an error response.
-		if resp.StatusCode != http.StatusOK {
-			return httpRespToErrorResponse(resp, dst.bucket, dst.object)
-		}
-
-		// Return nil on success.
-		return nil
-	}
-
-	// Now, handle multipart-copy cases.
-
-	// 1. Initiate a new multipart upload.
-
-	// Set user-metadata on the destination object. If no
-	// user-metadata is specified, and there is only one source,
-	// (only) then metadata from source is copied.
-	userMeta := dst.getUserMetaHeadersMap(false)
-	metaMap := userMeta
-	if len(userMeta) == 0 && len(srcs) == 1 {
-		metaMap = srcUserMeta
-	}
-	metaHeaders := make(map[string]string)
-	for k, v := range metaMap {
-		metaHeaders[k] = v
-	}
-	uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders})
-	if err != nil {
-		return err
-	}
-
-	// 2. Perform copy part uploads
-	objParts := []CompletePart{}
-	partIndex := 1
-	for i, src := range srcs {
-		h := src.Headers
-		// Add destination encryption headers
-		for k, v := range dst.encryption.getSSEHeaders(false) {
-			h.Set(k, v)
-		}
-
-		// calculate start/end indices of parts after
-		// splitting.
-		startIdx, endIdx := calculateEvenSplits(srcSizes[i], src)
-		for j, start := range startIdx {
-			end := endIdx[j]
-
-			// Add (or reset) source range header for
-			// upload part copy request.
-			h.Set("x-amz-copy-source-range",
-				fmt.Sprintf("bytes=%d-%d", start, end))
-
-			// make upload-part-copy request
-			complPart, err := c.uploadPartCopy(ctx, dst.bucket,
-				dst.object, uploadID, partIndex, h)
-			if err != nil {
-				return err
-			}
-			objParts = append(objParts, complPart)
-			partIndex++
-		}
-	}
-
-	// 3. Make final complete-multipart request.
-	_, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
-		completeMultipartUpload{Parts: objParts})
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// partsRequired is ceiling(size / copyPartSize)
-func partsRequired(size int64) int64 {
-	r := size / copyPartSize
-	if size%copyPartSize > 0 {
-		r++
-	}
-	return r
-}
-
-// calculateEvenSplits - computes splits for a source and returns
-// start and end index slices. Splits happen evenly to be sure that no
-// part is less than 5MiB, as that could fail the multipart request if
-// it is not the last part.
-func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) {
-	if size == 0 {
-		return
-	}
-
-	reqParts := partsRequired(size)
-	startIndex = make([]int64, reqParts)
-	endIndex = make([]int64, reqParts)
-	// Compute number of required parts `k`, as:
-	//
-	// k = ceiling(size / copyPartSize)
-	//
-	// Now, distribute the `size` bytes in the source into
-	// k parts as evenly as possible:
-	//
-	// r parts sized (q+1) bytes, and
-	// (k - r) parts sized q bytes, where
-	//
-	// size = q * k + r (by simple division of size by k,
-	// so that 0 <= r < k)
-	//
-	start := src.start
-	if start == -1 {
-		start = 0
-	}
-	quot, rem := size/reqParts, size%reqParts
-	nextStart := start
-	for j := int64(0); j < reqParts; j++ {
-		curPartSize := quot
-		if j < rem {
-			curPartSize++
-		}
-
-		cStart := nextStart
-		cEnd := cStart + curPartSize - 1
-		nextStart = cEnd + 1
-
-		startIndex[j], endIndex[j] = cStart, cEnd
-	}
-	return
-}

+ 0 - 84
vendor/github.com/minio/minio-go/api-datatypes.go

@@ -1,84 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"net/http"
-	"time"
-)
-
-// BucketInfo container for bucket metadata.
-type BucketInfo struct {
-	// The name of the bucket.
-	Name string `json:"name"`
-	// Date the bucket was created.
-	CreationDate time.Time `json:"creationDate"`
-}
-
-// ObjectInfo container for object metadata.
-type ObjectInfo struct {
-	// An ETag is optionally set to md5sum of an object.  In case of multipart objects,
-	// ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of
-	// each parts concatenated into one string.
-	ETag string `json:"etag"`
-
-	Key          string    `json:"name"`         // Name of the object
-	LastModified time.Time `json:"lastModified"` // Date and time the object was last modified.
-	Size         int64     `json:"size"`         // Size in bytes of the object.
-	ContentType  string    `json:"contentType"`  // A standard MIME type describing the format of the object data.
-
-	// Collection of additional metadata on the object.
-	// eg: x-amz-meta-*, content-encoding etc.
-	Metadata http.Header `json:"metadata" xml:"-"`
-
-	// Owner name.
-	Owner struct {
-		DisplayName string `json:"name"`
-		ID          string `json:"id"`
-	} `json:"owner"`
-
-	// The class of storage used to store the object.
-	StorageClass string `json:"storageClass"`
-
-	// Error
-	Err error `json:"-"`
-}
-
-// ObjectMultipartInfo container for multipart object metadata.
-type ObjectMultipartInfo struct {
-	// Date and time at which the multipart upload was initiated.
-	Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"`
-
-	Initiator initiator
-	Owner     owner
-
-	// The type of storage to use for the object. Defaults to 'STANDARD'.
-	StorageClass string
-
-	// Key of the object for which the multipart upload was initiated.
-	Key string
-
-	// Size in bytes of the object.
-	Size int64
-
-	// Upload ID that identifies the multipart upload.
-	UploadID string `xml:"UploadId"`
-
-	// Error
-	Err error
-}

+ 0 - 286
vendor/github.com/minio/minio-go/api-error-response.go

@@ -1,286 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"encoding/xml"
-	"fmt"
-	"net/http"
-)
-
-/* **** SAMPLE ERROR RESPONSE ****
-<?xml version="1.0" encoding="UTF-8"?>
-<Error>
-   <Code>AccessDenied</Code>
-   <Message>Access Denied</Message>
-   <BucketName>bucketName</BucketName>
-   <Key>objectName</Key>
-   <RequestId>F19772218238A85A</RequestId>
-   <HostId>GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD</HostId>
-</Error>
-*/
-
-// ErrorResponse - Is the typed error returned by all API operations.
-type ErrorResponse struct {
-	XMLName    xml.Name `xml:"Error" json:"-"`
-	Code       string
-	Message    string
-	BucketName string
-	Key        string
-	RequestID  string `xml:"RequestId"`
-	HostID     string `xml:"HostId"`
-
-	// Region where the bucket is located. This header is returned
-	// only in HEAD bucket and ListObjects response.
-	Region string
-
-	// Underlying HTTP status code for the returned error
-	StatusCode int `xml:"-" json:"-"`
-
-	// Headers of the returned S3 XML error
-	Headers http.Header `xml:"-" json:"-"`
-}
-
-// ToErrorResponse - Returns parsed ErrorResponse struct from body and
-// http headers.
-//
-// For example:
-//
-//   import s3 "github.com/minio/minio-go"
-//   ...
-//   ...
-//   reader, stat, err := s3.GetObject(...)
-//   if err != nil {
-//      resp := s3.ToErrorResponse(err)
-//   }
-//   ...
-func ToErrorResponse(err error) ErrorResponse {
-	switch err := err.(type) {
-	case ErrorResponse:
-		return err
-	default:
-		return ErrorResponse{}
-	}
-}
-
-// Error - Returns S3 error string.
-func (e ErrorResponse) Error() string {
-	if e.Message == "" {
-		msg, ok := s3ErrorResponseMap[e.Code]
-		if !ok {
-			msg = fmt.Sprintf("Error response code %s.", e.Code)
-		}
-		return msg
-	}
-	return e.Message
-}
-
-// Common string for errors to report issue location in unexpected
-// cases.
-const (
-	reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues."
-)
-
-// httpRespToErrorResponse returns a new encoded ErrorResponse
-// structure as error.
-func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error {
-	if resp == nil {
-		msg := "Response is empty. " + reportIssue
-		return ErrInvalidArgument(msg)
-	}
-
-	errResp := ErrorResponse{
-		StatusCode: resp.StatusCode,
-	}
-
-	err := xmlDecoder(resp.Body, &errResp)
-	// Xml decoding failed with no body, fall back to HTTP headers.
-	if err != nil {
-		switch resp.StatusCode {
-		case http.StatusNotFound:
-			if objectName == "" {
-				errResp = ErrorResponse{
-					StatusCode: resp.StatusCode,
-					Code:       "NoSuchBucket",
-					Message:    "The specified bucket does not exist.",
-					BucketName: bucketName,
-				}
-			} else {
-				errResp = ErrorResponse{
-					StatusCode: resp.StatusCode,
-					Code:       "NoSuchKey",
-					Message:    "The specified key does not exist.",
-					BucketName: bucketName,
-					Key:        objectName,
-				}
-			}
-		case http.StatusForbidden:
-			errResp = ErrorResponse{
-				StatusCode: resp.StatusCode,
-				Code:       "AccessDenied",
-				Message:    "Access Denied.",
-				BucketName: bucketName,
-				Key:        objectName,
-			}
-		case http.StatusConflict:
-			errResp = ErrorResponse{
-				StatusCode: resp.StatusCode,
-				Code:       "Conflict",
-				Message:    "Bucket not empty.",
-				BucketName: bucketName,
-			}
-		case http.StatusPreconditionFailed:
-			errResp = ErrorResponse{
-				StatusCode: resp.StatusCode,
-				Code:       "PreconditionFailed",
-				Message:    s3ErrorResponseMap["PreconditionFailed"],
-				BucketName: bucketName,
-				Key:        objectName,
-			}
-		default:
-			errResp = ErrorResponse{
-				StatusCode: resp.StatusCode,
-				Code:       resp.Status,
-				Message:    resp.Status,
-				BucketName: bucketName,
-			}
-		}
-	}
-
-	// Save hostID, requestID and region information
-	// from headers if not available through error XML.
-	if errResp.RequestID == "" {
-		errResp.RequestID = resp.Header.Get("x-amz-request-id")
-	}
-	if errResp.HostID == "" {
-		errResp.HostID = resp.Header.Get("x-amz-id-2")
-	}
-	if errResp.Region == "" {
-		errResp.Region = resp.Header.Get("x-amz-bucket-region")
-	}
-	if errResp.Code == "InvalidRegion" && errResp.Region != "" {
-		errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
-	}
-
-	// Save headers returned in the API XML error
-	errResp.Headers = resp.Header
-
-	return errResp
-}
-
-// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
-func ErrTransferAccelerationBucket(bucketName string) error {
-	return ErrorResponse{
-		StatusCode: http.StatusBadRequest,
-		Code:       "InvalidArgument",
-		Message:    "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
-		BucketName: bucketName,
-	}
-}
-
-// ErrEntityTooLarge - Input size is larger than supported maximum.
-func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
-	msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
-	return ErrorResponse{
-		StatusCode: http.StatusBadRequest,
-		Code:       "EntityTooLarge",
-		Message:    msg,
-		BucketName: bucketName,
-		Key:        objectName,
-	}
-}
-
-// ErrEntityTooSmall - Input size is smaller than supported minimum.
-func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
-	msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
-	return ErrorResponse{
-		StatusCode: http.StatusBadRequest,
-		Code:       "EntityTooSmall",
-		Message:    msg,
-		BucketName: bucketName,
-		Key:        objectName,
-	}
-}
-
-// ErrUnexpectedEOF - Unexpected end of file reached.
-func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
-	msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
-	return ErrorResponse{
-		StatusCode: http.StatusBadRequest,
-		Code:       "UnexpectedEOF",
-		Message:    msg,
-		BucketName: bucketName,
-		Key:        objectName,
-	}
-}
-
-// ErrInvalidBucketName - Invalid bucket name response.
-func ErrInvalidBucketName(message string) error {
-	return ErrorResponse{
-		StatusCode: http.StatusBadRequest,
-		Code:       "InvalidBucketName",
-		Message:    message,
-		RequestID:  "minio",
-	}
-}
-
-// ErrInvalidObjectName - Invalid object name response.
-func ErrInvalidObjectName(message string) error {
-	return ErrorResponse{
-		StatusCode: http.StatusNotFound,
-		Code:       "NoSuchKey",
-		Message:    message,
-		RequestID:  "minio",
-	}
-}
-
-// ErrInvalidObjectPrefix - Invalid object prefix response is
-// similar to object name response.
-var ErrInvalidObjectPrefix = ErrInvalidObjectName
-
-// ErrInvalidArgument - Invalid argument response.
-func ErrInvalidArgument(message string) error {
-	return ErrorResponse{
-		StatusCode: http.StatusBadRequest,
-		Code:       "InvalidArgument",
-		Message:    message,
-		RequestID:  "minio",
-	}
-}
-
-// ErrNoSuchBucketPolicy - No Such Bucket Policy response
-// The specified bucket does not have a bucket policy.
-func ErrNoSuchBucketPolicy(message string) error {
-	return ErrorResponse{
-		StatusCode: http.StatusNotFound,
-		Code:       "NoSuchBucketPolicy",
-		Message:    message,
-		RequestID:  "minio",
-	}
-}
-
-// ErrAPINotSupported - API not supported response
-// The specified API call is not supported
-func ErrAPINotSupported(message string) error {
-	return ErrorResponse{
-		StatusCode: http.StatusNotImplemented,
-		Code:       "APINotSupported",
-		Message:    message,
-		RequestID:  "minio",
-	}
-}

+ 0 - 26
vendor/github.com/minio/minio-go/api-get-object-context.go

@@ -1,26 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import "context"
-
-// GetObjectWithContext - returns an seekable, readable object.
-// The options can be used to specify the GET request further.
-func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
-	return c.getObjectWithContext(ctx, bucketName, objectName, opts)
-}

+ 0 - 136
vendor/github.com/minio/minio-go/api-get-object-file.go

@@ -1,136 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"io"
-	"os"
-	"path/filepath"
-
-	"github.com/minio/minio-go/pkg/encrypt"
-
-	"context"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// FGetObjectWithContext - download contents of an object to a local file.
-// The options can be used to specify the GET request further.
-func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
-	return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts)
-}
-
-// FGetObject - download contents of an object to a local file.
-func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error {
-	return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
-}
-
-// FGetEncryptedObject - Decrypt and store an object at filePath.
-func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error {
-	if materials == nil {
-		return ErrInvalidArgument("Unable to recognize empty encryption properties")
-	}
-	return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials})
-}
-
-// fGetObjectWithContext - fgetObject wrapper function with context
-func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return err
-	}
-
-	// Verify if destination already exists.
-	st, err := os.Stat(filePath)
-	if err == nil {
-		// If the destination exists and is a directory.
-		if st.IsDir() {
-			return ErrInvalidArgument("fileName is a directory.")
-		}
-	}
-
-	// Proceed if file does not exist. return for all other errors.
-	if err != nil {
-		if !os.IsNotExist(err) {
-			return err
-		}
-	}
-
-	// Extract top level directory.
-	objectDir, _ := filepath.Split(filePath)
-	if objectDir != "" {
-		// Create any missing top level directories.
-		if err := os.MkdirAll(objectDir, 0700); err != nil {
-			return err
-		}
-	}
-
-	// Gather md5sum.
-	objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts})
-	if err != nil {
-		return err
-	}
-
-	// Write to a temporary file "fileName.part.minio" before saving.
-	filePartPath := filePath + objectStat.ETag + ".part.minio"
-
-	// If exists, open in append mode. If not create it as a part file.
-	filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
-	if err != nil {
-		return err
-	}
-
-	// Issue Stat to get the current offset.
-	st, err = filePart.Stat()
-	if err != nil {
-		return err
-	}
-
-	// Initialize get object request headers to set the
-	// appropriate range offsets to read from.
-	if st.Size() > 0 {
-		opts.SetRange(st.Size(), 0)
-	}
-
-	// Seek to current position for incoming reader.
-	objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts)
-	if err != nil {
-		return err
-	}
-
-	// Write to the part file.
-	if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil {
-		return err
-	}
-
-	// Close the file before rename, this is specifically needed for Windows users.
-	if err = filePart.Close(); err != nil {
-		return err
-	}
-
-	// Safely completed. Now commit by renaming to actual filename.
-	if err = os.Rename(filePartPath, filePath); err != nil {
-		return err
-	}
-
-	// Return.
-	return nil
-}

+ 0 - 676
vendor/github.com/minio/minio-go/api-get-object.go

@@ -1,676 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"io"
-	"net/http"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/minio/minio-go/pkg/encrypt"
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials,
-// returned stream should be closed by the caller.
-func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
-	if encryptMaterials == nil {
-		return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
-	}
-
-	return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials})
-}
-
-// GetObject - returns an seekable, readable object.
-func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
-	return c.getObjectWithContext(context.Background(), bucketName, objectName, opts)
-}
-
-// GetObject wrapper function that accepts a request context
-func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return nil, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return nil, err
-	}
-
-	var httpReader io.ReadCloser
-	var objectInfo ObjectInfo
-	var err error
-
-	// Create request channel.
-	reqCh := make(chan getRequest)
-	// Create response channel.
-	resCh := make(chan getResponse)
-	// Create done channel.
-	doneCh := make(chan struct{})
-
-	// This routine feeds partial object data as and when the caller reads.
-	go func() {
-		defer close(reqCh)
-		defer close(resCh)
-
-		// Used to verify if etag of object has changed since last read.
-		var etag string
-
-		// Loop through the incoming control messages and read data.
-		for {
-			select {
-			// When the done channel is closed exit our routine.
-			case <-doneCh:
-				// Close the http response body before returning.
-				// This ends the connection with the server.
-				if httpReader != nil {
-					httpReader.Close()
-				}
-				return
-
-			// Gather incoming request.
-			case req := <-reqCh:
-				// If this is the first request we may not need to do a getObject request yet.
-				if req.isFirstReq {
-					// First request is a Read/ReadAt.
-					if req.isReadOp {
-						// Differentiate between wanting the whole object and just a range.
-						if req.isReadAt {
-							// If this is a ReadAt request only get the specified range.
-							// Range is set with respect to the offset and length of the buffer requested.
-							// Do not set objectInfo from the first readAt request because it will not get
-							// the whole object.
-							opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
-						} else if req.Offset > 0 {
-							opts.SetRange(req.Offset, 0)
-						}
-						httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
-						if err != nil {
-							resCh <- getResponse{Error: err}
-							return
-						}
-						etag = objectInfo.ETag
-						// Read at least firstReq.Buffer bytes, if not we have
-						// reached our EOF.
-						size, err := io.ReadFull(httpReader, req.Buffer)
-						if size > 0 && err == io.ErrUnexpectedEOF {
-							// If an EOF happens after reading some but not
-							// all the bytes ReadFull returns ErrUnexpectedEOF
-							err = io.EOF
-						}
-						// Send back the first response.
-						resCh <- getResponse{
-							objectInfo: objectInfo,
-							Size:       int(size),
-							Error:      err,
-							didRead:    true,
-						}
-					} else {
-						// First request is a Stat or Seek call.
-						// Only need to run a StatObject until an actual Read or ReadAt request comes through.
-						objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
-						if err != nil {
-							resCh <- getResponse{
-								Error: err,
-							}
-							// Exit the go-routine.
-							return
-						}
-						etag = objectInfo.ETag
-						// Send back the first response.
-						resCh <- getResponse{
-							objectInfo: objectInfo,
-						}
-					}
-				} else if req.settingObjectInfo { // Request is just to get objectInfo.
-					if etag != "" {
-						opts.SetMatchETag(etag)
-					}
-					objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
-					if err != nil {
-						resCh <- getResponse{
-							Error: err,
-						}
-						// Exit the goroutine.
-						return
-					}
-					// Send back the objectInfo.
-					resCh <- getResponse{
-						objectInfo: objectInfo,
-					}
-				} else {
-					// Offset changes fetch the new object at an Offset.
-					// Because the httpReader may not be set by the first
-					// request if it was a stat or seek it must be checked
-					// if the object has been read or not to only initialize
-					// new ones when they haven't been already.
-					// All readAt requests are new requests.
-					if req.DidOffsetChange || !req.beenRead {
-						if etag != "" {
-							opts.SetMatchETag(etag)
-						}
-						if httpReader != nil {
-							// Close previously opened http reader.
-							httpReader.Close()
-						}
-						// If this request is a readAt only get the specified range.
-						if req.isReadAt {
-							// Range is set with respect to the offset and length of the buffer requested.
-							opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
-						} else if req.Offset > 0 { // Range is set with respect to the offset.
-							opts.SetRange(req.Offset, 0)
-						}
-						httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
-						if err != nil {
-							resCh <- getResponse{
-								Error: err,
-							}
-							return
-						}
-					}
-
-					// Read at least req.Buffer bytes, if not we have
-					// reached our EOF.
-					size, err := io.ReadFull(httpReader, req.Buffer)
-					if err == io.ErrUnexpectedEOF {
-						// If an EOF happens after reading some but not
-						// all the bytes ReadFull returns ErrUnexpectedEOF
-						err = io.EOF
-					}
-					// Reply back how much was read.
-					resCh <- getResponse{
-						Size:       int(size),
-						Error:      err,
-						didRead:    true,
-						objectInfo: objectInfo,
-					}
-				}
-			}
-		}
-	}()
-
-	// Create a newObject through the information sent back by reqCh.
-	return newObject(reqCh, resCh, doneCh), nil
-}
-
-// get request message container to communicate with internal
-// go-routine.
-type getRequest struct {
-	Buffer            []byte
-	Offset            int64 // readAt offset.
-	DidOffsetChange   bool  // Tracks the offset changes for Seek requests.
-	beenRead          bool  // Determines if this is the first time an object is being read.
-	isReadAt          bool  // Determines if this request is a request to a specific range
-	isReadOp          bool  // Determines if this request is a Read or Read/At request.
-	isFirstReq        bool  // Determines if this request is the first time an object is being accessed.
-	settingObjectInfo bool  // Determines if this request is to set the objectInfo of an object.
-}
-
-// get response message container to reply back for the request.
-type getResponse struct {
-	Size       int
-	Error      error
-	didRead    bool       // Lets subsequent calls know whether or not httpReader has been initiated.
-	objectInfo ObjectInfo // Used for the first request.
-}
-
-// Object represents an open object. It implements
-// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
-type Object struct {
-	// Mutex.
-	mutex *sync.Mutex
-
-	// User allocated and defined.
-	reqCh      chan<- getRequest
-	resCh      <-chan getResponse
-	doneCh     chan<- struct{}
-	currOffset int64
-	objectInfo ObjectInfo
-
-	// Ask lower level to initiate data fetching based on currOffset
-	seekData bool
-
-	// Keeps track of closed call.
-	isClosed bool
-
-	// Keeps track of if this is the first call.
-	isStarted bool
-
-	// Previous error saved for future calls.
-	prevErr error
-
-	// Keeps track of if this object has been read yet.
-	beenRead bool
-
-	// Keeps track of if objectInfo has been set yet.
-	objectInfoSet bool
-}
-
-// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
-// Returns back the size of the buffer read, if anything was read, as well
-// as any error encountered. For all first requests sent on the object
-// it is also responsible for sending back the objectInfo.
-func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
-	o.reqCh <- request
-	response := <-o.resCh
-
-	// Return any error to the top level.
-	if response.Error != nil {
-		return response, response.Error
-	}
-
-	// This was the first request.
-	if !o.isStarted {
-		// The object has been operated on.
-		o.isStarted = true
-	}
-	// Set the objectInfo if the request was not readAt
-	// and it hasn't been set before.
-	if !o.objectInfoSet && !request.isReadAt {
-		o.objectInfo = response.objectInfo
-		o.objectInfoSet = true
-	}
-	// Set beenRead only if it has not been set before.
-	if !o.beenRead {
-		o.beenRead = response.didRead
-	}
-	// Data are ready on the wire, no need to reinitiate connection in lower level
-	o.seekData = false
-
-	return response, nil
-}
-
-// setOffset - handles the setting of offsets for
-// Read/ReadAt/Seek requests.
-func (o *Object) setOffset(bytesRead int64) error {
-	// Update the currentOffset.
-	o.currOffset += bytesRead
-
-	if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
-		return io.EOF
-	}
-	return nil
-}
-
-// Read reads up to len(b) bytes into b. It returns the number of
-// bytes read (0 <= n <= len(b)) and any error encountered. Returns
-// io.EOF upon end of file.
-func (o *Object) Read(b []byte) (n int, err error) {
-	if o == nil {
-		return 0, ErrInvalidArgument("Object is nil")
-	}
-
-	// Locking.
-	o.mutex.Lock()
-	defer o.mutex.Unlock()
-
-	// prevErr is previous error saved from previous operation.
-	if o.prevErr != nil || o.isClosed {
-		return 0, o.prevErr
-	}
-	// Create a new request.
-	readReq := getRequest{
-		isReadOp: true,
-		beenRead: o.beenRead,
-		Buffer:   b,
-	}
-
-	// Alert that this is the first request.
-	if !o.isStarted {
-		readReq.isFirstReq = true
-	}
-
-	// Ask to establish a new data fetch routine based on seekData flag
-	readReq.DidOffsetChange = o.seekData
-	readReq.Offset = o.currOffset
-
-	// Send and receive from the first request.
-	response, err := o.doGetRequest(readReq)
-	if err != nil && err != io.EOF {
-		// Save the error for future calls.
-		o.prevErr = err
-		return response.Size, err
-	}
-
-	// Bytes read.
-	bytesRead := int64(response.Size)
-
-	// Set the new offset.
-	oerr := o.setOffset(bytesRead)
-	if oerr != nil {
-		// Save the error for future calls.
-		o.prevErr = oerr
-		return response.Size, oerr
-	}
-
-	// Return the response.
-	return response.Size, err
-}
-
-// Stat returns the ObjectInfo structure describing Object.
-func (o *Object) Stat() (ObjectInfo, error) {
-	if o == nil {
-		return ObjectInfo{}, ErrInvalidArgument("Object is nil")
-	}
-	// Locking.
-	o.mutex.Lock()
-	defer o.mutex.Unlock()
-
-	if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
-		return ObjectInfo{}, o.prevErr
-	}
-
-	// This is the first request.
-	if !o.isStarted || !o.objectInfoSet {
-		statReq := getRequest{
-			isFirstReq:        !o.isStarted,
-			settingObjectInfo: !o.objectInfoSet,
-		}
-
-		// Send the request and get the response.
-		_, err := o.doGetRequest(statReq)
-		if err != nil {
-			o.prevErr = err
-			return ObjectInfo{}, err
-		}
-	}
-
-	return o.objectInfo, nil
-}
-
-// ReadAt reads len(b) bytes from the File starting at byte offset
-// off. It returns the number of bytes read and the error, if any.
-// ReadAt always returns a non-nil error when n < len(b). At end of
-// file, that error is io.EOF.
-func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
-	if o == nil {
-		return 0, ErrInvalidArgument("Object is nil")
-	}
-
-	// Locking.
-	o.mutex.Lock()
-	defer o.mutex.Unlock()
-
-	// prevErr is error which was saved in previous operation.
-	if o.prevErr != nil || o.isClosed {
-		return 0, o.prevErr
-	}
-
-	// Can only compare offsets to size when size has been set.
-	if o.objectInfoSet {
-		// If offset is negative than we return io.EOF.
-		// If offset is greater than or equal to object size we return io.EOF.
-		if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
-			return 0, io.EOF
-		}
-	}
-
-	// Create the new readAt request.
-	readAtReq := getRequest{
-		isReadOp:        true,
-		isReadAt:        true,
-		DidOffsetChange: true,       // Offset always changes.
-		beenRead:        o.beenRead, // Set if this is the first request to try and read.
-		Offset:          offset,     // Set the offset.
-		Buffer:          b,
-	}
-
-	// Alert that this is the first request.
-	if !o.isStarted {
-		readAtReq.isFirstReq = true
-	}
-
-	// Send and receive from the first request.
-	response, err := o.doGetRequest(readAtReq)
-	if err != nil && err != io.EOF {
-		// Save the error.
-		o.prevErr = err
-		return response.Size, err
-	}
-	// Bytes read.
-	bytesRead := int64(response.Size)
-	// There is no valid objectInfo yet
-	// 	to compare against for EOF.
-	if !o.objectInfoSet {
-		// Update the currentOffset.
-		o.currOffset += bytesRead
-	} else {
-		// If this was not the first request update
-		// the offsets and compare against objectInfo
-		// for EOF.
-		oerr := o.setOffset(bytesRead)
-		if oerr != nil {
-			o.prevErr = oerr
-			return response.Size, oerr
-		}
-	}
-	return response.Size, err
-}
-
-// Seek sets the offset for the next Read or Write to offset,
-// interpreted according to whence: 0 means relative to the
-// origin of the file, 1 means relative to the current offset,
-// and 2 means relative to the end.
-// Seek returns the new offset and an error, if any.
-//
-// Seeking to a negative offset is an error. Seeking to any positive
-// offset is legal, subsequent io operations succeed until the
-// underlying object is not closed.
-func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
-	if o == nil {
-		return 0, ErrInvalidArgument("Object is nil")
-	}
-
-	// Locking.
-	o.mutex.Lock()
-	defer o.mutex.Unlock()
-
-	if o.prevErr != nil {
-		// At EOF seeking is legal allow only io.EOF, for any other errors we return.
-		if o.prevErr != io.EOF {
-			return 0, o.prevErr
-		}
-	}
-
-	// Negative offset is valid for whence of '2'.
-	if offset < 0 && whence != 2 {
-		return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
-	}
-
-	// This is the first request. So before anything else
-	// get the ObjectInfo.
-	if !o.isStarted || !o.objectInfoSet {
-		// Create the new Seek request.
-		seekReq := getRequest{
-			isReadOp:   false,
-			Offset:     offset,
-			isFirstReq: true,
-		}
-		// Send and receive from the seek request.
-		_, err := o.doGetRequest(seekReq)
-		if err != nil {
-			// Save the error.
-			o.prevErr = err
-			return 0, err
-		}
-	}
-
-	// Switch through whence.
-	switch whence {
-	default:
-		return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
-	case 0:
-		if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
-			return 0, io.EOF
-		}
-		o.currOffset = offset
-	case 1:
-		if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
-			return 0, io.EOF
-		}
-		o.currOffset += offset
-	case 2:
-		// If we don't know the object size return an error for io.SeekEnd
-		if o.objectInfo.Size < 0 {
-			return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown")
-		}
-		// Seeking to positive offset is valid for whence '2', but
-		// since we are backing a Reader we have reached 'EOF' if
-		// offset is positive.
-		if offset > 0 {
-			return 0, io.EOF
-		}
-		// Seeking to negative position not allowed for whence.
-		if o.objectInfo.Size+offset < 0 {
-			return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
-		}
-		o.currOffset = o.objectInfo.Size + offset
-	}
-	// Reset the saved error since we successfully seeked, let the Read
-	// and ReadAt decide.
-	if o.prevErr == io.EOF {
-		o.prevErr = nil
-	}
-
-	// Ask lower level to fetch again from source
-	o.seekData = true
-
-	// Return the effective offset.
-	return o.currOffset, nil
-}
-
-// Close - The behavior of Close after the first call returns error
-// for subsequent Close() calls.
-func (o *Object) Close() (err error) {
-	if o == nil {
-		return ErrInvalidArgument("Object is nil")
-	}
-	// Locking.
-	o.mutex.Lock()
-	defer o.mutex.Unlock()
-
-	// if already closed return an error.
-	if o.isClosed {
-		return o.prevErr
-	}
-
-	// Close successfully.
-	close(o.doneCh)
-
-	// Save for future operations.
-	errMsg := "Object is already closed. Bad file descriptor."
-	o.prevErr = errors.New(errMsg)
-	// Save here that we closed done channel successfully.
-	o.isClosed = true
-	return nil
-}
-
-// newObject instantiates a new *minio.Object*
-// ObjectInfo will be set by setObjectInfo
-func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
-	return &Object{
-		mutex:  &sync.Mutex{},
-		reqCh:  reqCh,
-		resCh:  resCh,
-		doneCh: doneCh,
-	}
-}
-
-// getObject - retrieve object from Object Storage.
-//
-// Additionally this function also takes range arguments to download the specified
-// range bytes of an object. Setting offset and length = 0 will download the full object.
-//
-// For more information about the HTTP Range header.
-// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
-func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
-	// Validate input arguments.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return nil, ObjectInfo{}, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return nil, ObjectInfo{}, err
-	}
-
-	// Execute GET on objectName.
-	resp, err := c.executeMethod(ctx, "GET", requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		customHeader:     opts.Header(),
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	if err != nil {
-		return nil, ObjectInfo{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
-			return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-
-	// Trim off the odd double quotes from ETag in the beginning and end.
-	md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
-	md5sum = strings.TrimSuffix(md5sum, "\"")
-
-	// Parse the date.
-	date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
-	if err != nil {
-		msg := "Last-Modified time format not recognized. " + reportIssue
-		return nil, ObjectInfo{}, ErrorResponse{
-			Code:      "InternalError",
-			Message:   msg,
-			RequestID: resp.Header.Get("x-amz-request-id"),
-			HostID:    resp.Header.Get("x-amz-id-2"),
-			Region:    resp.Header.Get("x-amz-bucket-region"),
-		}
-	}
-
-	// Get content-type.
-	contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
-	if contentType == "" {
-		contentType = "application/octet-stream"
-	}
-
-	objectStat := ObjectInfo{
-		ETag:         md5sum,
-		Key:          objectName,
-		Size:         resp.ContentLength,
-		LastModified: date,
-		ContentType:  contentType,
-		// Extract only the relevant header keys describing the object.
-		// following function filters out a list of standard set of keys
-		// which are not part of object metadata.
-		Metadata: extractObjMetadata(resp.Header),
-	}
-
-	reader := resp.Body
-	if opts.Materials != nil {
-		err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey))
-		if err != nil {
-			return nil, ObjectInfo{}, err
-		}
-		reader = opts.Materials
-	}
-
-	// do not close body here, caller will close
-	return reader, objectStat, nil
-}

+ 0 - 126
vendor/github.com/minio/minio-go/api-get-options.go

@@ -1,126 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"fmt"
-	"net/http"
-	"time"
-
-	"github.com/minio/minio-go/pkg/encrypt"
-)
-
-// GetObjectOptions are used to specify additional headers or options
-// during GET requests.
-type GetObjectOptions struct {
-	headers map[string]string
-
-	Materials encrypt.Materials
-}
-
-// StatObjectOptions are used to specify additional headers or options
-// during GET info/stat requests.
-type StatObjectOptions struct {
-	GetObjectOptions
-}
-
-// Header returns the http.Header representation of the GET options.
-func (o GetObjectOptions) Header() http.Header {
-	headers := make(http.Header, len(o.headers))
-	for k, v := range o.headers {
-		headers.Set(k, v)
-	}
-	return headers
-}
-
-// Set adds a key value pair to the options. The
-// key-value pair will be part of the HTTP GET request
-// headers.
-func (o *GetObjectOptions) Set(key, value string) {
-	if o.headers == nil {
-		o.headers = make(map[string]string)
-	}
-	o.headers[http.CanonicalHeaderKey(key)] = value
-}
-
-// SetMatchETag - set match etag.
-func (o *GetObjectOptions) SetMatchETag(etag string) error {
-	if etag == "" {
-		return ErrInvalidArgument("ETag cannot be empty.")
-	}
-	o.Set("If-Match", "\""+etag+"\"")
-	return nil
-}
-
-// SetMatchETagExcept - set match etag except.
-func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
-	if etag == "" {
-		return ErrInvalidArgument("ETag cannot be empty.")
-	}
-	o.Set("If-None-Match", "\""+etag+"\"")
-	return nil
-}
-
-// SetUnmodified - set unmodified time since.
-func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
-	if modTime.IsZero() {
-		return ErrInvalidArgument("Modified since cannot be empty.")
-	}
-	o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
-	return nil
-}
-
-// SetModified - set modified time since.
-func (o *GetObjectOptions) SetModified(modTime time.Time) error {
-	if modTime.IsZero() {
-		return ErrInvalidArgument("Modified since cannot be empty.")
-	}
-	o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
-	return nil
-}
-
-// SetRange - set the start and end offset of the object to be read.
-// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
-func (o *GetObjectOptions) SetRange(start, end int64) error {
-	switch {
-	case start == 0 && end < 0:
-		// Read last '-end' bytes. `bytes=-N`.
-		o.Set("Range", fmt.Sprintf("bytes=%d", end))
-	case 0 < start && end == 0:
-		// Read everything starting from offset
-		// 'start'. `bytes=N-`.
-		o.Set("Range", fmt.Sprintf("bytes=%d-", start))
-	case 0 <= start && start <= end:
-		// Read everything starting at 'start' till the
-		// 'end'. `bytes=N-M`
-		o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
-	default:
-		// All other cases such as
-		// bytes=-3-
-		// bytes=5-3
-		// bytes=-2-4
-		// bytes=-3-0
-		// bytes=-3--2
-		// are invalid.
-		return ErrInvalidArgument(
-			fmt.Sprintf(
-				"Invalid range specified: start=%d end=%d",
-				start, end))
-	}
-	return nil
-}

+ 0 - 109
vendor/github.com/minio/minio-go/api-get-policy.go

@@ -1,109 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"encoding/json"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-
-	"github.com/minio/minio-go/pkg/policy"
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// GetBucketPolicy - get bucket policy at a given path.
-func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return policy.BucketPolicyNone, err
-	}
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		return policy.BucketPolicyNone, err
-	}
-	policyInfo, err := c.getBucketPolicy(bucketName)
-	if err != nil {
-		errResponse := ToErrorResponse(err)
-		if errResponse.Code == "NoSuchBucketPolicy" {
-			return policy.BucketPolicyNone, nil
-		}
-		return policy.BucketPolicyNone, err
-	}
-	return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
-}
-
-// ListBucketPolicies - list all policies for a given prefix and all its children.
-func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return map[string]policy.BucketPolicy{}, err
-	}
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		return map[string]policy.BucketPolicy{}, err
-	}
-	policyInfo, err := c.getBucketPolicy(bucketName)
-	if err != nil {
-		errResponse := ToErrorResponse(err)
-		if errResponse.Code == "NoSuchBucketPolicy" {
-			return map[string]policy.BucketPolicy{}, nil
-		}
-		return map[string]policy.BucketPolicy{}, err
-	}
-	return policy.GetPolicies(policyInfo.Statements, bucketName), nil
-}
-
-// Default empty bucket access policy.
-var emptyBucketAccessPolicy = policy.BucketAccessPolicy{
-	Version: "2012-10-17",
-}
-
-// Request server for current bucket policy.
-func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) {
-	// Get resources properly escaped and lined up before
-	// using them in http request.
-	urlValues := make(url.Values)
-	urlValues.Set("policy", "")
-
-	// Execute GET on bucket to list objects.
-	resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-
-	defer closeResponse(resp)
-	if err != nil {
-		return emptyBucketAccessPolicy, err
-	}
-
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-
-	bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return emptyBucketAccessPolicy, err
-	}
-
-	policy := policy.BucketAccessPolicy{}
-	err = json.Unmarshal(bucketPolicyBuf, &policy)
-	return policy, err
-}

+ 0 - 717
vendor/github.com/minio/minio-go/api-list.go

@@ -1,717 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"net/http"
-	"net/url"
-	"strings"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// ListBuckets list all buckets owned by this authenticated user.
-//
-// This call requires explicit authentication, no anonymous requests are
-// allowed for listing buckets.
-//
-//   api := client.New(....)
-//   for message := range api.ListBuckets() {
-//       fmt.Println(message)
-//   }
-//
-func (c Client) ListBuckets() ([]BucketInfo, error) {
-	// Execute GET on service.
-	resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex})
-	defer closeResponse(resp)
-	if err != nil {
-		return nil, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return nil, httpRespToErrorResponse(resp, "", "")
-		}
-	}
-	listAllMyBucketsResult := listAllMyBucketsResult{}
-	err = xmlDecoder(resp.Body, &listAllMyBucketsResult)
-	if err != nil {
-		return nil, err
-	}
-	return listAllMyBucketsResult.Buckets.Bucket, nil
-}
-
-/// Bucket Read Operations.
-
-// ListObjectsV2 lists all objects matching the objectPrefix from
-// the specified bucket. If recursion is enabled it would list
-// all subdirectories and all its contents.
-//
-// Your input parameters are just bucketName, objectPrefix, recursive
-// and a done channel for pro-actively closing the internal go
-// routine. If you enable recursive as 'true' this function will
-// return back all the objects in a given bucket name and object
-// prefix.
-//
-//   api := client.New(....)
-//   // Create a done channel.
-//   doneCh := make(chan struct{})
-//   defer close(doneCh)
-//   // Recursively list all objects in 'mytestbucket'
-//   recursive := true
-//   for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
-//       fmt.Println(message)
-//   }
-//
-func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
-	// Allocate new list objects channel.
-	objectStatCh := make(chan ObjectInfo, 1)
-	// Default listing is delimited at "/"
-	delimiter := "/"
-	if recursive {
-		// If recursive we do not delimit.
-		delimiter = ""
-	}
-
-	// Return object owner information by default
-	fetchOwner := true
-
-	// Validate bucket name.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		defer close(objectStatCh)
-		objectStatCh <- ObjectInfo{
-			Err: err,
-		}
-		return objectStatCh
-	}
-
-	// Validate incoming object prefix.
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		defer close(objectStatCh)
-		objectStatCh <- ObjectInfo{
-			Err: err,
-		}
-		return objectStatCh
-	}
-
-	// Initiate list objects goroutine here.
-	go func(objectStatCh chan<- ObjectInfo) {
-		defer close(objectStatCh)
-		// Save continuationToken for next request.
-		var continuationToken string
-		for {
-			// Get list of objects a maximum of 1000 per request.
-			result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
-			if err != nil {
-				objectStatCh <- ObjectInfo{
-					Err: err,
-				}
-				return
-			}
-
-			// If contents are available loop through and send over channel.
-			for _, object := range result.Contents {
-				select {
-				// Send object content.
-				case objectStatCh <- object:
-				// If receives done from the caller, return here.
-				case <-doneCh:
-					return
-				}
-			}
-
-			// Send all common prefixes if any.
-			// NOTE: prefixes are only present if the request is delimited.
-			for _, obj := range result.CommonPrefixes {
-				select {
-				// Send object prefixes.
-				case objectStatCh <- ObjectInfo{
-					Key:  obj.Prefix,
-					Size: 0,
-				}:
-				// If receives done from the caller, return here.
-				case <-doneCh:
-					return
-				}
-			}
-
-			// If continuation token present, save it for next request.
-			if result.NextContinuationToken != "" {
-				continuationToken = result.NextContinuationToken
-			}
-
-			// Listing ends result is not truncated, return right here.
-			if !result.IsTruncated {
-				return
-			}
-		}
-	}(objectStatCh)
-	return objectStatCh
-}
-
-// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
-//
-// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
-// request parameters :-
-// ---------
-// ?continuation-token - Specifies the key to start with when listing objects in a bucket.
-// ?delimiter - A delimiter is a character you use to group keys.
-// ?prefix - Limits the response to keys that begin with the specified prefix.
-// ?max-keys - Sets the maximum number of keys returned in the response body.
-func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
-	// Validate bucket name.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return ListBucketV2Result{}, err
-	}
-	// Validate object prefix.
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		return ListBucketV2Result{}, err
-	}
-	// Get resources properly escaped and lined up before
-	// using them in http request.
-	urlValues := make(url.Values)
-
-	// Always set list-type in ListObjects V2
-	urlValues.Set("list-type", "2")
-
-	// Set object prefix.
-	if objectPrefix != "" {
-		urlValues.Set("prefix", objectPrefix)
-	}
-	// Set continuation token
-	if continuationToken != "" {
-		urlValues.Set("continuation-token", continuationToken)
-	}
-	// Set delimiter.
-	if delimiter != "" {
-		urlValues.Set("delimiter", delimiter)
-	}
-
-	// Fetch owner when listing
-	if fetchOwner {
-		urlValues.Set("fetch-owner", "true")
-	}
-
-	// maxkeys should default to 1000 or less.
-	if maxkeys == 0 || maxkeys > 1000 {
-		maxkeys = 1000
-	}
-	// Set max keys.
-	urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
-
-	// Execute GET on bucket to list objects.
-	resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return ListBucketV2Result{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-
-	// Decode listBuckets XML.
-	listBucketResult := ListBucketV2Result{}
-	if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
-		return listBucketResult, err
-	}
-
-	// This is an additional verification check to make
-	// sure proper responses are received.
-	if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
-		return listBucketResult, errors.New("Truncated response should have continuation token set")
-	}
-
-	// Success.
-	return listBucketResult, nil
-}
-
-// ListObjects - (List Objects) - List some objects or all recursively.
-//
-// ListObjects lists all objects matching the objectPrefix from
-// the specified bucket. If recursion is enabled it would list
-// all subdirectories and all its contents.
-//
-// Your input parameters are just bucketName, objectPrefix, recursive
-// and a done channel for pro-actively closing the internal go
-// routine. If you enable recursive as 'true' this function will
-// return back all the objects in a given bucket name and object
-// prefix.
-//
-//   api := client.New(....)
-//   // Create a done channel.
-//   doneCh := make(chan struct{})
-//   defer close(doneCh)
-//   // Recurively list all objects in 'mytestbucket'
-//   recursive := true
-//   for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) {
-//       fmt.Println(message)
-//   }
-//
-func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
-	// Allocate new list objects channel.
-	objectStatCh := make(chan ObjectInfo, 1)
-	// Default listing is delimited at "/"
-	delimiter := "/"
-	if recursive {
-		// If recursive we do not delimit.
-		delimiter = ""
-	}
-	// Validate bucket name.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		defer close(objectStatCh)
-		objectStatCh <- ObjectInfo{
-			Err: err,
-		}
-		return objectStatCh
-	}
-	// Validate incoming object prefix.
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		defer close(objectStatCh)
-		objectStatCh <- ObjectInfo{
-			Err: err,
-		}
-		return objectStatCh
-	}
-
-	// Initiate list objects goroutine here.
-	go func(objectStatCh chan<- ObjectInfo) {
-		defer close(objectStatCh)
-		// Save marker for next request.
-		var marker string
-		for {
-			// Get list of objects a maximum of 1000 per request.
-			result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000)
-			if err != nil {
-				objectStatCh <- ObjectInfo{
-					Err: err,
-				}
-				return
-			}
-
-			// If contents are available loop through and send over channel.
-			for _, object := range result.Contents {
-				// Save the marker.
-				marker = object.Key
-				select {
-				// Send object content.
-				case objectStatCh <- object:
-				// If receives done from the caller, return here.
-				case <-doneCh:
-					return
-				}
-			}
-
-			// Send all common prefixes if any.
-			// NOTE: prefixes are only present if the request is delimited.
-			for _, obj := range result.CommonPrefixes {
-				object := ObjectInfo{}
-				object.Key = obj.Prefix
-				object.Size = 0
-				select {
-				// Send object prefixes.
-				case objectStatCh <- object:
-				// If receives done from the caller, return here.
-				case <-doneCh:
-					return
-				}
-			}
-
-			// If next marker present, save it for next request.
-			if result.NextMarker != "" {
-				marker = result.NextMarker
-			}
-
-			// Listing ends result is not truncated, return right here.
-			if !result.IsTruncated {
-				return
-			}
-		}
-	}(objectStatCh)
-	return objectStatCh
-}
-
-// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
-//
-// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
-// request parameters :-
-// ---------
-// ?marker - Specifies the key to start with when listing objects in a bucket.
-// ?delimiter - A delimiter is a character you use to group keys.
-// ?prefix - Limits the response to keys that begin with the specified prefix.
-// ?max-keys - Sets the maximum number of keys returned in the response body.
-func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
-	// Validate bucket name.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return ListBucketResult{}, err
-	}
-	// Validate object prefix.
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		return ListBucketResult{}, err
-	}
-	// Get resources properly escaped and lined up before
-	// using them in http request.
-	urlValues := make(url.Values)
-	// Set object prefix.
-	if objectPrefix != "" {
-		urlValues.Set("prefix", objectPrefix)
-	}
-	// Set object marker.
-	if objectMarker != "" {
-		urlValues.Set("marker", objectMarker)
-	}
-	// Set delimiter.
-	if delimiter != "" {
-		urlValues.Set("delimiter", delimiter)
-	}
-
-	// maxkeys should default to 1000 or less.
-	if maxkeys == 0 || maxkeys > 1000 {
-		maxkeys = 1000
-	}
-	// Set max keys.
-	urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
-
-	// Execute GET on bucket to list objects.
-	resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return ListBucketResult{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-	// Decode listBuckets XML.
-	listBucketResult := ListBucketResult{}
-	err = xmlDecoder(resp.Body, &listBucketResult)
-	if err != nil {
-		return listBucketResult, err
-	}
-	return listBucketResult, nil
-}
-
-// ListIncompleteUploads - List incompletely uploaded multipart objects.
-//
-// ListIncompleteUploads lists all incompleted objects matching the
-// objectPrefix from the specified bucket. If recursion is enabled
-// it would list all subdirectories and all its contents.
-//
-// Your input parameters are just bucketName, objectPrefix, recursive
-// and a done channel to pro-actively close the internal go routine.
-// If you enable recursive as 'true' this function will return back all
-// the multipart objects in a given bucket name.
-//
-//   api := client.New(....)
-//   // Create a done channel.
-//   doneCh := make(chan struct{})
-//   defer close(doneCh)
-//   // Recurively list all objects in 'mytestbucket'
-//   recursive := true
-//   for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) {
-//       fmt.Println(message)
-//   }
-//
-func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
-	// Turn on size aggregation of individual parts.
-	isAggregateSize := true
-	return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh)
-}
-
-// listIncompleteUploads lists all incomplete uploads.
-func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
-	// Allocate channel for multipart uploads.
-	objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
-	// Delimiter is set to "/" by default.
-	delimiter := "/"
-	if recursive {
-		// If recursive do not delimit.
-		delimiter = ""
-	}
-	// Validate bucket name.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		defer close(objectMultipartStatCh)
-		objectMultipartStatCh <- ObjectMultipartInfo{
-			Err: err,
-		}
-		return objectMultipartStatCh
-	}
-	// Validate incoming object prefix.
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		defer close(objectMultipartStatCh)
-		objectMultipartStatCh <- ObjectMultipartInfo{
-			Err: err,
-		}
-		return objectMultipartStatCh
-	}
-	go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
-		defer close(objectMultipartStatCh)
-		// object and upload ID marker for future requests.
-		var objectMarker string
-		var uploadIDMarker string
-		for {
-			// list all multipart uploads.
-			result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000)
-			if err != nil {
-				objectMultipartStatCh <- ObjectMultipartInfo{
-					Err: err,
-				}
-				return
-			}
-			// Save objectMarker and uploadIDMarker for next request.
-			objectMarker = result.NextKeyMarker
-			uploadIDMarker = result.NextUploadIDMarker
-			// Send all multipart uploads.
-			for _, obj := range result.Uploads {
-				// Calculate total size of the uploaded parts if 'aggregateSize' is enabled.
-				if aggregateSize {
-					// Get total multipart size.
-					obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID)
-					if err != nil {
-						objectMultipartStatCh <- ObjectMultipartInfo{
-							Err: err,
-						}
-						continue
-					}
-				}
-				select {
-				// Send individual uploads here.
-				case objectMultipartStatCh <- obj:
-				// If done channel return here.
-				case <-doneCh:
-					return
-				}
-			}
-			// Send all common prefixes if any.
-			// NOTE: prefixes are only present if the request is delimited.
-			for _, obj := range result.CommonPrefixes {
-				object := ObjectMultipartInfo{}
-				object.Key = obj.Prefix
-				object.Size = 0
-				select {
-				// Send delimited prefixes here.
-				case objectMultipartStatCh <- object:
-				// If done channel return here.
-				case <-doneCh:
-					return
-				}
-			}
-			// Listing ends if result not truncated, return right here.
-			if !result.IsTruncated {
-				return
-			}
-		}
-	}(objectMultipartStatCh)
-	// return.
-	return objectMultipartStatCh
-}
-
-// listMultipartUploads - (List Multipart Uploads).
-//   - Lists some or all (up to 1000) in-progress multipart uploads in a bucket.
-//
-// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket.
-// request parameters. :-
-// ---------
-// ?key-marker - Specifies the multipart upload after which listing should begin.
-// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin.
-// ?delimiter - A delimiter is a character you use to group keys.
-// ?prefix - Limits the response to keys that begin with the specified prefix.
-// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
-func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
-	// Get resources properly escaped and lined up before using them in http request.
-	urlValues := make(url.Values)
-	// Set uploads.
-	urlValues.Set("uploads", "")
-	// Set object key marker.
-	if keyMarker != "" {
-		urlValues.Set("key-marker", keyMarker)
-	}
-	// Set upload id marker.
-	if uploadIDMarker != "" {
-		urlValues.Set("upload-id-marker", uploadIDMarker)
-	}
-	// Set prefix marker.
-	if prefix != "" {
-		urlValues.Set("prefix", prefix)
-	}
-	// Set delimiter.
-	if delimiter != "" {
-		urlValues.Set("delimiter", delimiter)
-	}
-
-	// maxUploads should be 1000 or less.
-	if maxUploads == 0 || maxUploads > 1000 {
-		maxUploads = 1000
-	}
-	// Set max-uploads.
-	urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
-
-	// Execute GET on bucketName to list multipart uploads.
-	resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return ListMultipartUploadsResult{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-	// Decode response body.
-	listMultipartUploadsResult := ListMultipartUploadsResult{}
-	err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
-	if err != nil {
-		return listMultipartUploadsResult, err
-	}
-	return listMultipartUploadsResult, nil
-}
-
-// listObjectParts list all object parts recursively.
-func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
-	// Part number marker for the next batch of request.
-	var nextPartNumberMarker int
-	partsInfo = make(map[int]ObjectPart)
-	for {
-		// Get list of uploaded parts a maximum of 1000 per request.
-		listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
-		if err != nil {
-			return nil, err
-		}
-		// Append to parts info.
-		for _, part := range listObjPartsResult.ObjectParts {
-			// Trim off the odd double quotes from ETag in the beginning and end.
-			part.ETag = strings.TrimPrefix(part.ETag, "\"")
-			part.ETag = strings.TrimSuffix(part.ETag, "\"")
-			partsInfo[part.PartNumber] = part
-		}
-		// Keep part number marker, for the next iteration.
-		nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker
-		// Listing ends result is not truncated, return right here.
-		if !listObjPartsResult.IsTruncated {
-			break
-		}
-	}
-
-	// Return all the parts.
-	return partsInfo, nil
-}
-
-// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
-func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) {
-	// Make list incomplete uploads recursive.
-	isRecursive := true
-	// Turn off size aggregation of individual parts, in this request.
-	isAggregateSize := false
-	// latestUpload to track the latest multipart info for objectName.
-	var latestUpload ObjectMultipartInfo
-	// Create done channel to cleanup the routine.
-	doneCh := make(chan struct{})
-	defer close(doneCh)
-	// List all incomplete uploads.
-	for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
-		if mpUpload.Err != nil {
-			return "", mpUpload.Err
-		}
-		if objectName == mpUpload.Key {
-			if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 {
-				latestUpload = mpUpload
-			}
-		}
-	}
-	// Return the latest upload id.
-	return latestUpload.UploadID, nil
-}
-
-// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
-func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) {
-	// Iterate over all parts and aggregate the size.
-	partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID)
-	if err != nil {
-		return 0, err
-	}
-	for _, partInfo := range partsInfo {
-		size += partInfo.Size
-	}
-	return size, nil
-}
-
-// listObjectPartsQuery (List Parts query)
-//     - lists some or all (up to 1000) parts that have been uploaded
-//     for a specific multipart upload
-//
-// You can use the request parameters as selection criteria to return
-// a subset of the uploads in a bucket, request parameters :-
-// ---------
-// ?part-number-marker - Specifies the part after which listing should
-// begin.
-// ?max-parts - Maximum parts to be listed per request.
-func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
-	// Get resources properly escaped and lined up before using them in http request.
-	urlValues := make(url.Values)
-	// Set part number marker.
-	urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker))
-	// Set upload id.
-	urlValues.Set("uploadId", uploadID)
-
-	// maxParts should be 1000 or less.
-	if maxParts == 0 || maxParts > 1000 {
-		maxParts = 1000
-	}
-	// Set max parts.
-	urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
-
-	// Execute GET on objectName to get list of parts.
-	resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return ListObjectPartsResult{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-	// Decode list object parts XML.
-	listObjectPartsResult := ListObjectPartsResult{}
-	err = xmlDecoder(resp.Body, &listObjectPartsResult)
-	if err != nil {
-		return listObjectPartsResult, err
-	}
-	return listObjectPartsResult, nil
-}

+ 0 - 230
vendor/github.com/minio/minio-go/api-notification.go

@@ -1,230 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"bufio"
-	"context"
-	"encoding/json"
-	"io"
-	"net/http"
-	"net/url"
-	"time"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// GetBucketNotification - get bucket notification at a given path.
-func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return BucketNotification{}, err
-	}
-	notification, err := c.getBucketNotification(bucketName)
-	if err != nil {
-		return BucketNotification{}, err
-	}
-	return notification, nil
-}
-
-// Request server for notification rules.
-func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
-	urlValues := make(url.Values)
-	urlValues.Set("notification", "")
-
-	// Execute GET on bucket to list objects.
-	resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-
-	defer closeResponse(resp)
-	if err != nil {
-		return BucketNotification{}, err
-	}
-	return processBucketNotificationResponse(bucketName, resp)
-
-}
-
-// processes the GetNotification http response from the server.
-func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
-	if resp.StatusCode != http.StatusOK {
-		errResponse := httpRespToErrorResponse(resp, bucketName, "")
-		return BucketNotification{}, errResponse
-	}
-	var bucketNotification BucketNotification
-	err := xmlDecoder(resp.Body, &bucketNotification)
-	if err != nil {
-		return BucketNotification{}, err
-	}
-	return bucketNotification, nil
-}
-
-// Indentity represents the user id, this is a compliance field.
-type identity struct {
-	PrincipalID string `json:"principalId"`
-}
-
-// Notification event bucket metadata.
-type bucketMeta struct {
-	Name          string   `json:"name"`
-	OwnerIdentity identity `json:"ownerIdentity"`
-	ARN           string   `json:"arn"`
-}
-
-// Notification event object metadata.
-type objectMeta struct {
-	Key       string `json:"key"`
-	Size      int64  `json:"size,omitempty"`
-	ETag      string `json:"eTag,omitempty"`
-	VersionID string `json:"versionId,omitempty"`
-	Sequencer string `json:"sequencer"`
-}
-
-// Notification event server specific metadata.
-type eventMeta struct {
-	SchemaVersion   string     `json:"s3SchemaVersion"`
-	ConfigurationID string     `json:"configurationId"`
-	Bucket          bucketMeta `json:"bucket"`
-	Object          objectMeta `json:"object"`
-}
-
-// sourceInfo represents information on the client that
-// triggered the event notification.
-type sourceInfo struct {
-	Host      string `json:"host"`
-	Port      string `json:"port"`
-	UserAgent string `json:"userAgent"`
-}
-
-// NotificationEvent represents an Amazon an S3 bucket notification event.
-type NotificationEvent struct {
-	EventVersion      string            `json:"eventVersion"`
-	EventSource       string            `json:"eventSource"`
-	AwsRegion         string            `json:"awsRegion"`
-	EventTime         string            `json:"eventTime"`
-	EventName         string            `json:"eventName"`
-	UserIdentity      identity          `json:"userIdentity"`
-	RequestParameters map[string]string `json:"requestParameters"`
-	ResponseElements  map[string]string `json:"responseElements"`
-	S3                eventMeta         `json:"s3"`
-	Source            sourceInfo        `json:"source"`
-}
-
-// NotificationInfo - represents the collection of notification events, additionally
-// also reports errors if any while listening on bucket notifications.
-type NotificationInfo struct {
-	Records []NotificationEvent
-	Err     error
-}
-
-// ListenBucketNotification - listen on bucket notifications.
-func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
-	notificationInfoCh := make(chan NotificationInfo, 1)
-	// Only success, start a routine to start reading line by line.
-	go func(notificationInfoCh chan<- NotificationInfo) {
-		defer close(notificationInfoCh)
-
-		// Validate the bucket name.
-		if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-			notificationInfoCh <- NotificationInfo{
-				Err: err,
-			}
-			return
-		}
-
-		// Check ARN partition to verify if listening bucket is supported
-		if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
-			notificationInfoCh <- NotificationInfo{
-				Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
-			}
-			return
-		}
-
-		// Continuously run and listen on bucket notification.
-		// Create a done channel to control 'ListObjects' go routine.
-		retryDoneCh := make(chan struct{}, 1)
-
-		// Indicate to our routine to exit cleanly upon return.
-		defer close(retryDoneCh)
-
-		// Wait on the jitter retry loop.
-		for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
-			urlValues := make(url.Values)
-			urlValues.Set("prefix", prefix)
-			urlValues.Set("suffix", suffix)
-			urlValues["events"] = events
-
-			// Execute GET on bucket to list objects.
-			resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
-				bucketName:       bucketName,
-				queryValues:      urlValues,
-				contentSHA256Hex: emptySHA256Hex,
-			})
-			if err != nil {
-				notificationInfoCh <- NotificationInfo{
-					Err: err,
-				}
-				return
-			}
-
-			// Validate http response, upon error return quickly.
-			if resp.StatusCode != http.StatusOK {
-				errResponse := httpRespToErrorResponse(resp, bucketName, "")
-				notificationInfoCh <- NotificationInfo{
-					Err: errResponse,
-				}
-				return
-			}
-
-			// Initialize a new bufio scanner, to read line by line.
-			bio := bufio.NewScanner(resp.Body)
-
-			// Close the response body.
-			defer resp.Body.Close()
-
-			// Unmarshal each line, returns marshalled values.
-			for bio.Scan() {
-				var notificationInfo NotificationInfo
-				if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
-					continue
-				}
-				// Send notifications on channel only if there are events received.
-				if len(notificationInfo.Records) > 0 {
-					select {
-					case notificationInfoCh <- notificationInfo:
-					case <-doneCh:
-						return
-					}
-				}
-			}
-			// Look for any underlying errors.
-			if err = bio.Err(); err != nil {
-				// For an unexpected connection drop from server, we close the body
-				// and re-connect.
-				if err == io.ErrUnexpectedEOF {
-					resp.Body.Close()
-				}
-			}
-		}
-	}(notificationInfoCh)
-
-	// Returns the notification info channel, for caller to start reading from.
-	return notificationInfoCh
-}

+ 0 - 213
vendor/github.com/minio/minio-go/api-presigned.go

@@ -1,213 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"errors"
-	"net/http"
-	"net/url"
-	"time"
-
-	"github.com/minio/minio-go/pkg/s3signer"
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// presignURL - Returns a presigned URL for an input 'method'.
-// Expires maximum is 7days - ie. 604800 and minimum is 1.
-func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
-	// Input validation.
-	if method == "" {
-		return nil, ErrInvalidArgument("method cannot be empty.")
-	}
-	if err = s3utils.CheckValidBucketName(bucketName); err != nil {
-		return nil, err
-	}
-	if err = isValidExpiry(expires); err != nil {
-		return nil, err
-	}
-
-	// Convert expires into seconds.
-	expireSeconds := int64(expires / time.Second)
-	reqMetadata := requestMetadata{
-		presignURL:  true,
-		bucketName:  bucketName,
-		objectName:  objectName,
-		expires:     expireSeconds,
-		queryValues: reqParams,
-	}
-
-	// Instantiate a new request.
-	// Since expires is set newRequest will presign the request.
-	var req *http.Request
-	if req, err = c.newRequest(method, reqMetadata); err != nil {
-		return nil, err
-	}
-	return req.URL, nil
-}
-
-// PresignedGetObject - Returns a presigned URL to access an object
-// data without credentials. URL can have a maximum expiry of
-// upto 7days or a minimum of 1sec. Additionally you can override
-// a set of response headers using the query parameters.
-func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
-	if err = s3utils.CheckValidObjectName(objectName); err != nil {
-		return nil, err
-	}
-	return c.presignURL("GET", bucketName, objectName, expires, reqParams)
-}
-
-// PresignedHeadObject - Returns a presigned URL to access object
-// metadata without credentials. URL can have a maximum expiry of
-// upto 7days or a minimum of 1sec. Additionally you can override
-// a set of response headers using the query parameters.
-func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
-	if err = s3utils.CheckValidObjectName(objectName); err != nil {
-		return nil, err
-	}
-	return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
-}
-
-// PresignedPutObject - Returns a presigned URL to upload an object
-// without credentials. URL can have a maximum expiry of upto 7days
-// or a minimum of 1sec.
-func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
-	if err = s3utils.CheckValidObjectName(objectName); err != nil {
-		return nil, err
-	}
-	return c.presignURL("PUT", bucketName, objectName, expires, nil)
-}
-
-// Presign - returns a presigned URL for any http method of your choice
-// along with custom request params. URL can have a maximum expiry of
-// upto 7days or a minimum of 1sec.
-func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
-	return c.presignURL(method, bucketName, objectName, expires, reqParams)
-}
-
-// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
-func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
-	// Validate input arguments.
-	if p.expiration.IsZero() {
-		return nil, nil, errors.New("Expiration time must be specified")
-	}
-	if _, ok := p.formData["key"]; !ok {
-		return nil, nil, errors.New("object key must be specified")
-	}
-	if _, ok := p.formData["bucket"]; !ok {
-		return nil, nil, errors.New("bucket name must be specified")
-	}
-
-	bucketName := p.formData["bucket"]
-	// Fetch the bucket location.
-	location, err := c.getBucketLocation(bucketName)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	u, err = c.makeTargetURL(bucketName, "", location, nil)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	// Get credentials from the configured credentials provider.
-	credValues, err := c.credsProvider.Get()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	var (
-		signerType      = credValues.SignerType
-		sessionToken    = credValues.SessionToken
-		accessKeyID     = credValues.AccessKeyID
-		secretAccessKey = credValues.SecretAccessKey
-	)
-
-	if signerType.IsAnonymous() {
-		return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials")
-	}
-
-	// Keep time.
-	t := time.Now().UTC()
-	// For signature version '2' handle here.
-	if signerType.IsV2() {
-		policyBase64 := p.base64()
-		p.formData["policy"] = policyBase64
-		// For Google endpoint set this value to be 'GoogleAccessId'.
-		if s3utils.IsGoogleEndpoint(c.endpointURL) {
-			p.formData["GoogleAccessId"] = accessKeyID
-		} else {
-			// For all other endpoints set this value to be 'AWSAccessKeyId'.
-			p.formData["AWSAccessKeyId"] = accessKeyID
-		}
-		// Sign the policy.
-		p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
-		return u, p.formData, nil
-	}
-
-	// Add date policy.
-	if err = p.addNewPolicy(policyCondition{
-		matchType: "eq",
-		condition: "$x-amz-date",
-		value:     t.Format(iso8601DateFormat),
-	}); err != nil {
-		return nil, nil, err
-	}
-
-	// Add algorithm policy.
-	if err = p.addNewPolicy(policyCondition{
-		matchType: "eq",
-		condition: "$x-amz-algorithm",
-		value:     signV4Algorithm,
-	}); err != nil {
-		return nil, nil, err
-	}
-
-	// Add a credential policy.
-	credential := s3signer.GetCredential(accessKeyID, location, t)
-	if err = p.addNewPolicy(policyCondition{
-		matchType: "eq",
-		condition: "$x-amz-credential",
-		value:     credential,
-	}); err != nil {
-		return nil, nil, err
-	}
-
-	if sessionToken != "" {
-		if err = p.addNewPolicy(policyCondition{
-			matchType: "eq",
-			condition: "$x-amz-security-token",
-			value:     sessionToken,
-		}); err != nil {
-			return nil, nil, err
-		}
-	}
-
-	// Get base64 encoded policy.
-	policyBase64 := p.base64()
-
-	// Fill in the form data.
-	p.formData["policy"] = policyBase64
-	p.formData["x-amz-algorithm"] = signV4Algorithm
-	p.formData["x-amz-credential"] = credential
-	p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
-	if sessionToken != "" {
-		p.formData["x-amz-security-token"] = sessionToken
-	}
-	p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
-	return u, p.formData, nil
-}

+ 0 - 255
vendor/github.com/minio/minio-go/api-put-bucket.go

@@ -1,255 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"bytes"
-	"context"
-	"encoding/json"
-	"encoding/xml"
-	"fmt"
-	"net/http"
-	"net/url"
-
-	"github.com/minio/minio-go/pkg/policy"
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-/// Bucket operations
-
-// MakeBucket creates a new bucket with bucketName.
-//
-// Location is an optional argument, by default all buckets are
-// created in US Standard Region.
-//
-// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
-// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
-func (c Client) MakeBucket(bucketName string, location string) (err error) {
-	defer func() {
-		// Save the location into cache on a successful makeBucket response.
-		if err == nil {
-			c.bucketLocCache.Set(bucketName, location)
-		}
-	}()
-
-	// Validate the input arguments.
-	if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
-		return err
-	}
-
-	// If location is empty, treat is a default region 'us-east-1'.
-	if location == "" {
-		location = "us-east-1"
-		// For custom region clients, default
-		// to custom region instead not 'us-east-1'.
-		if c.region != "" {
-			location = c.region
-		}
-	}
-	// PUT bucket request metadata.
-	reqMetadata := requestMetadata{
-		bucketName:     bucketName,
-		bucketLocation: location,
-	}
-
-	// If location is not 'us-east-1' create bucket location config.
-	if location != "us-east-1" && location != "" {
-		createBucketConfig := createBucketConfiguration{}
-		createBucketConfig.Location = location
-		var createBucketConfigBytes []byte
-		createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
-		if err != nil {
-			return err
-		}
-		reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
-		reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
-		reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
-		reqMetadata.contentLength = int64(len(createBucketConfigBytes))
-	}
-
-	// Execute PUT to create a new bucket.
-	resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
-	defer closeResponse(resp)
-	if err != nil {
-		return err
-	}
-
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-
-	// Success.
-	return nil
-}
-
-// SetBucketPolicy set the access permissions on an existing bucket.
-//
-// For example
-//
-//  none - owner gets full access [default].
-//  readonly - anonymous get access for everyone at a given object prefix.
-//  readwrite - anonymous list/put/delete access to a given object prefix.
-//  writeonly - anonymous put/delete access to a given object prefix.
-func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-	if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
-		return err
-	}
-
-	if !bucketPolicy.IsValidBucketPolicy() {
-		return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
-	}
-
-	policyInfo, err := c.getBucketPolicy(bucketName)
-	errResponse := ToErrorResponse(err)
-	if err != nil && errResponse.Code != "NoSuchBucketPolicy" {
-		return err
-	}
-
-	if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
-		// As the request is for removing policy and the bucket
-		// has empty policy statements, just return success.
-		return nil
-	}
-
-	policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
-
-	// Save the updated policies.
-	return c.putBucketPolicy(bucketName, policyInfo)
-}
-
-// Saves a new bucket policy.
-func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-
-	// If there are no policy statements, we should remove entire policy.
-	if len(policyInfo.Statements) == 0 {
-		return c.removeBucketPolicy(bucketName)
-	}
-
-	// Get resources properly escaped and lined up before
-	// using them in http request.
-	urlValues := make(url.Values)
-	urlValues.Set("policy", "")
-
-	policyBytes, err := json.Marshal(&policyInfo)
-	if err != nil {
-		return err
-	}
-
-	policyBuffer := bytes.NewReader(policyBytes)
-	reqMetadata := requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentBody:      policyBuffer,
-		contentLength:    int64(len(policyBytes)),
-		contentMD5Base64: sumMD5Base64(policyBytes),
-		contentSHA256Hex: sum256Hex(policyBytes),
-	}
-
-	// Execute PUT to upload a new bucket policy.
-	resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
-	defer closeResponse(resp)
-	if err != nil {
-		return err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusNoContent {
-			return httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-	return nil
-}
-
-// Removes all policies on a bucket.
-func (c Client) removeBucketPolicy(bucketName string) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-	// Get resources properly escaped and lined up before
-	// using them in http request.
-	urlValues := make(url.Values)
-	urlValues.Set("policy", "")
-
-	// Execute DELETE on objectName.
-	resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// SetBucketNotification saves a new bucket notification.
-func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-
-	// Get resources properly escaped and lined up before
-	// using them in http request.
-	urlValues := make(url.Values)
-	urlValues.Set("notification", "")
-
-	notifBytes, err := xml.Marshal(bucketNotification)
-	if err != nil {
-		return err
-	}
-
-	notifBuffer := bytes.NewReader(notifBytes)
-	reqMetadata := requestMetadata{
-		bucketName:       bucketName,
-		queryValues:      urlValues,
-		contentBody:      notifBuffer,
-		contentLength:    int64(len(notifBytes)),
-		contentMD5Base64: sumMD5Base64(notifBytes),
-		contentSHA256Hex: sum256Hex(notifBytes),
-	}
-
-	// Execute PUT to upload a new bucket notification.
-	resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
-	defer closeResponse(resp)
-	if err != nil {
-		return err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-	return nil
-}
-
-// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
-func (c Client) RemoveAllBucketNotification(bucketName string) error {
-	return c.SetBucketNotification(bucketName, BucketNotification{})
-}

+ 0 - 111
vendor/github.com/minio/minio-go/api-put-object-common.go

@@ -1,111 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"io"
-	"math"
-	"os"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// Verify if reader is *minio.Object
-func isObject(reader io.Reader) (ok bool) {
-	_, ok = reader.(*Object)
-	return
-}
-
-// Verify if reader is a generic ReaderAt
-func isReadAt(reader io.Reader) (ok bool) {
-	_, ok = reader.(io.ReaderAt)
-	if ok {
-		var v *os.File
-		v, ok = reader.(*os.File)
-		if ok {
-			// Stdin, Stdout and Stderr all have *os.File type
-			// which happen to also be io.ReaderAt compatible
-			// we need to add special conditions for them to
-			// be ignored by this function.
-			for _, f := range []string{
-				"/dev/stdin",
-				"/dev/stdout",
-				"/dev/stderr",
-			} {
-				if f == v.Name() {
-					ok = false
-					break
-				}
-			}
-		}
-	}
-	return
-}
-
-// optimalPartInfo - calculate the optimal part info for a given
-// object size.
-//
-// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible
-// object storage it will have the following parameters as constants.
-//
-//  maxPartsCount - 10000
-//  minPartSize - 64MiB
-//  maxMultipartPutObjectSize - 5TiB
-//
-func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
-	// object size is '-1' set it to 5TiB.
-	if objectSize == -1 {
-		objectSize = maxMultipartPutObjectSize
-	}
-	// object size is larger than supported maximum.
-	if objectSize > maxMultipartPutObjectSize {
-		err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "")
-		return
-	}
-	// Use floats for part size for all calculations to avoid
-	// overflows during float64 to int64 conversions.
-	partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount))
-	partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize
-	// Total parts count.
-	totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt))
-	// Part size.
-	partSize = int64(partSizeFlt)
-	// Last part size.
-	lastPartSize = objectSize - int64(totalPartsCount-1)*partSize
-	return totalPartsCount, partSize, lastPartSize, nil
-}
-
-// getUploadID - fetch upload id if already present for an object name
-// or initiate a new request to fetch a new upload id.
-func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return "", err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return "", err
-	}
-
-	// Initiate multipart upload for an object.
-	initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
-	if err != nil {
-		return "", err
-	}
-	return initMultipartUploadResult.UploadID, nil
-}

+ 0 - 39
vendor/github.com/minio/minio-go/api-put-object-context.go

@@ -1,39 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"io"
-)
-
-// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation.
-func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
-	opts PutObjectOptions) (n int64, err error) {
-	err = opts.validate()
-	if err != nil {
-		return 0, err
-	}
-	if opts.EncryptMaterials != nil {
-		if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil {
-			return 0, err
-		}
-		return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts)
-	}
-	return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
-}

+ 0 - 23
vendor/github.com/minio/minio-go/api-put-object-copy.go

@@ -1,23 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-// CopyObject - copy a source object into a new object
-func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
-	return c.ComposeObject(dst, []SourceInfo{src})
-}

+ 0 - 44
vendor/github.com/minio/minio-go/api-put-object-encrypted.go

@@ -1,44 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"io"
-
-	"github.com/minio/minio-go/pkg/encrypt"
-)
-
-// PutEncryptedObject - Encrypt and store object.
-func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) {
-
-	if encryptMaterials == nil {
-		return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
-	}
-
-	if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
-		return 0, err
-	}
-
-	return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials})
-}
-
-// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath.
-func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) {
-	return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials})
-}

+ 0 - 64
vendor/github.com/minio/minio-go/api-put-object-file-context.go

@@ -1,64 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"mime"
-	"os"
-	"path/filepath"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
-func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return 0, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return 0, err
-	}
-
-	// Open the referenced file.
-	fileReader, err := os.Open(filePath)
-	// If any error fail quickly here.
-	if err != nil {
-		return 0, err
-	}
-	defer fileReader.Close()
-
-	// Save the file stat.
-	fileStat, err := fileReader.Stat()
-	if err != nil {
-		return 0, err
-	}
-
-	// Save the file size.
-	fileSize := fileStat.Size()
-
-	// Set contentType based on filepath extension if not given or default
-	// value of "application/octet-stream" if the extension has no associated type.
-	if opts.ContentType == "" {
-		if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
-			opts.ContentType = "application/octet-stream"
-		}
-	}
-	return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts)
-}

+ 0 - 27
vendor/github.com/minio/minio-go/api-put-object-file.go

@@ -1,27 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-)
-
-// FPutObject - Create an object in a bucket, with contents from file at filePath
-func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
-	return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
-}

+ 0 - 373
vendor/github.com/minio/minio-go/api-put-object-multipart.go

@@ -1,373 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"bytes"
-	"context"
-	"encoding/base64"
-	"encoding/hex"
-	"encoding/xml"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"net/url"
-	"runtime/debug"
-	"sort"
-	"strconv"
-	"strings"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
-	opts PutObjectOptions) (n int64, err error) {
-	n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
-	if err != nil {
-		errResp := ToErrorResponse(err)
-		// Verify if multipart functionality is not available, if not
-		// fall back to single PutObject operation.
-		if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
-			// Verify if size of reader is greater than '5GiB'.
-			if size > maxSinglePutObjectSize {
-				return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
-			}
-			// Fall back to uploading as single PutObject operation.
-			return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
-		}
-	}
-	return n, err
-}
-
-func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
-	// Input validation.
-	if err = s3utils.CheckValidBucketName(bucketName); err != nil {
-		return 0, err
-	}
-	if err = s3utils.CheckValidObjectName(objectName); err != nil {
-		return 0, err
-	}
-
-	// Total data read and written to server. should be equal to
-	// 'size' at the end of the call.
-	var totalUploadedSize int64
-
-	// Complete multipart upload.
-	var complMultipartUpload completeMultipartUpload
-
-	// Calculate the optimal parts info for a given size.
-	totalPartsCount, partSize, _, err := optimalPartInfo(-1)
-	if err != nil {
-		return 0, err
-	}
-
-	// Initiate a new multipart upload.
-	uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
-	if err != nil {
-		return 0, err
-	}
-
-	defer func() {
-		if err != nil {
-			c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
-		}
-	}()
-
-	// Part number always starts with '1'.
-	partNumber := 1
-
-	// Initialize parts uploaded map.
-	partsInfo := make(map[int]ObjectPart)
-
-	// Create a buffer.
-	buf := make([]byte, partSize)
-	defer debug.FreeOSMemory()
-
-	for partNumber <= totalPartsCount {
-		// Choose hash algorithms to be calculated by hashCopyN,
-		// avoid sha256 with non-v4 signature request or
-		// HTTPS connection.
-		hashAlgos, hashSums := c.hashMaterials()
-
-		length, rErr := io.ReadFull(reader, buf)
-		if rErr == io.EOF {
-			break
-		}
-		if rErr != nil && rErr != io.ErrUnexpectedEOF {
-			return 0, rErr
-		}
-
-		// Calculates hash sums while copying partSize bytes into cw.
-		for k, v := range hashAlgos {
-			v.Write(buf[:length])
-			hashSums[k] = v.Sum(nil)
-		}
-
-		// Update progress reader appropriately to the latest offset
-		// as we read from the source.
-		rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
-
-		// Checksums..
-		var (
-			md5Base64 string
-			sha256Hex string
-		)
-		if hashSums["md5"] != nil {
-			md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
-		}
-		if hashSums["sha256"] != nil {
-			sha256Hex = hex.EncodeToString(hashSums["sha256"])
-		}
-
-		// Proceed to upload the part.
-		var objPart ObjectPart
-		objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
-			md5Base64, sha256Hex, int64(length), opts.UserMetadata)
-		if err != nil {
-			return totalUploadedSize, err
-		}
-
-		// Save successfully uploaded part metadata.
-		partsInfo[partNumber] = objPart
-
-		// Save successfully uploaded size.
-		totalUploadedSize += int64(length)
-
-		// Increment part number.
-		partNumber++
-
-		// For unknown size, Read EOF we break away.
-		// We do not have to upload till totalPartsCount.
-		if rErr == io.EOF {
-			break
-		}
-	}
-
-	// Loop over total uploaded parts to save them in
-	// Parts array before completing the multipart request.
-	for i := 1; i < partNumber; i++ {
-		part, ok := partsInfo[i]
-		if !ok {
-			return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
-		}
-		complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
-			ETag:       part.ETag,
-			PartNumber: part.PartNumber,
-		})
-	}
-
-	// Sort all completed parts.
-	sort.Sort(completedParts(complMultipartUpload.Parts))
-	if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
-		return totalUploadedSize, err
-	}
-
-	// Return final size.
-	return totalUploadedSize, nil
-}
-
-// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return initiateMultipartUploadResult{}, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return initiateMultipartUploadResult{}, err
-	}
-
-	// Initialize url queries.
-	urlValues := make(url.Values)
-	urlValues.Set("uploads", "")
-
-	// Set ContentType header.
-	customHeader := opts.Header()
-
-	reqMetadata := requestMetadata{
-		bucketName:   bucketName,
-		objectName:   objectName,
-		queryValues:  urlValues,
-		customHeader: customHeader,
-	}
-
-	// Execute POST on an objectName to initiate multipart upload.
-	resp, err := c.executeMethod(ctx, "POST", reqMetadata)
-	defer closeResponse(resp)
-	if err != nil {
-		return initiateMultipartUploadResult{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-	// Decode xml for new multipart upload.
-	initiateMultipartUploadResult := initiateMultipartUploadResult{}
-	err = xmlDecoder(resp.Body, &initiateMultipartUploadResult)
-	if err != nil {
-		return initiateMultipartUploadResult, err
-	}
-	return initiateMultipartUploadResult, nil
-}
-
-const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
-
-// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
-	partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return ObjectPart{}, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return ObjectPart{}, err
-	}
-	if size > maxPartSize {
-		return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
-	}
-	if size <= -1 {
-		return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
-	}
-	if partNumber <= 0 {
-		return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
-	}
-	if uploadID == "" {
-		return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
-	}
-
-	// Get resources properly escaped and lined up before using them in http request.
-	urlValues := make(url.Values)
-	// Set part number.
-	urlValues.Set("partNumber", strconv.Itoa(partNumber))
-	// Set upload id.
-	urlValues.Set("uploadId", uploadID)
-
-	// Set encryption headers, if any.
-	customHeader := make(http.Header)
-	for k, v := range metadata {
-		if len(v) > 0 {
-			if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
-				customHeader.Set(k, v)
-			}
-		}
-	}
-
-	reqMetadata := requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		queryValues:      urlValues,
-		customHeader:     customHeader,
-		contentBody:      reader,
-		contentLength:    size,
-		contentMD5Base64: md5Base64,
-		contentSHA256Hex: sha256Hex,
-	}
-
-	// Execute PUT on each part.
-	resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
-	defer closeResponse(resp)
-	if err != nil {
-		return ObjectPart{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-	// Once successfully uploaded, return completed part.
-	objPart := ObjectPart{}
-	objPart.Size = size
-	objPart.PartNumber = partNumber
-	// Trim off the odd double quotes from ETag in the beginning and end.
-	objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
-	objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"")
-	return objPart, nil
-}
-
-// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
-func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
-	complete completeMultipartUpload) (completeMultipartUploadResult, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return completeMultipartUploadResult{}, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return completeMultipartUploadResult{}, err
-	}
-
-	// Initialize url queries.
-	urlValues := make(url.Values)
-	urlValues.Set("uploadId", uploadID)
-	// Marshal complete multipart body.
-	completeMultipartUploadBytes, err := xml.Marshal(complete)
-	if err != nil {
-		return completeMultipartUploadResult{}, err
-	}
-
-	// Instantiate all the complete multipart buffer.
-	completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
-	reqMetadata := requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		queryValues:      urlValues,
-		contentBody:      completeMultipartUploadBuffer,
-		contentLength:    int64(len(completeMultipartUploadBytes)),
-		contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
-	}
-
-	// Execute POST to complete multipart upload for an objectName.
-	resp, err := c.executeMethod(ctx, "POST", reqMetadata)
-	defer closeResponse(resp)
-	if err != nil {
-		return completeMultipartUploadResult{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-
-	// Read resp.Body into a []bytes to parse for Error response inside the body
-	var b []byte
-	b, err = ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return completeMultipartUploadResult{}, err
-	}
-	// Decode completed multipart upload response on success.
-	completeMultipartUploadResult := completeMultipartUploadResult{}
-	err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
-	if err != nil {
-		// xml parsing failure due to presence an ill-formed xml fragment
-		return completeMultipartUploadResult, err
-	} else if completeMultipartUploadResult.Bucket == "" {
-		// xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
-		// In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
-		// of the members.
-
-		// Decode completed multipart upload response on failure
-		completeMultipartUploadErr := ErrorResponse{}
-		err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
-		if err != nil {
-			// xml parsing failure due to presence an ill-formed xml fragment
-			return completeMultipartUploadResult, err
-		}
-		return completeMultipartUploadResult, completeMultipartUploadErr
-	}
-	return completeMultipartUploadResult, nil
-}

+ 0 - 417
vendor/github.com/minio/minio-go/api-put-object-streaming.go

@@ -1,417 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"net/http"
-	"sort"
-	"strings"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// putObjectMultipartStream - upload a large object using
-// multipart upload and streaming signature for signing payload.
-// Comprehensive put object operation involving multipart uploads.
-//
-// Following code handles these types of readers.
-//
-//  - *minio.Object
-//  - Any reader which has a method 'ReadAt()'
-//
-func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
-	reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
-
-	if !isObject(reader) && isReadAt(reader) {
-		// Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
-		n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
-	} else {
-		n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts)
-	}
-	if err != nil {
-		errResp := ToErrorResponse(err)
-		// Verify if multipart functionality is not available, if not
-		// fall back to single PutObject operation.
-		if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
-			// Verify if size of reader is greater than '5GiB'.
-			if size > maxSinglePutObjectSize {
-				return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
-			}
-			// Fall back to uploading as single PutObject operation.
-			return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
-		}
-	}
-	return n, err
-}
-
-// uploadedPartRes - the response received from a part upload.
-type uploadedPartRes struct {
-	Error   error // Any error encountered while uploading the part.
-	PartNum int   // Number of the part uploaded.
-	Size    int64 // Size of the part uploaded.
-	Part    *ObjectPart
-}
-
-type uploadPartReq struct {
-	PartNum int         // Number of the part uploaded.
-	Part    *ObjectPart // Size of the part uploaded.
-}
-
-// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
-// Supports all readers which implements io.ReaderAt interface
-// (ReadAt method).
-//
-// NOTE: This function is meant to be used for all readers which
-// implement io.ReaderAt which allows us for resuming multipart
-// uploads but reading at an offset, which would avoid re-read the
-// data which was already uploaded. Internally this function uses
-// temporary files for staging all the data, these temporary files are
-// cleaned automatically when the caller i.e http client closes the
-// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
-	reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) {
-	// Input validation.
-	if err = s3utils.CheckValidBucketName(bucketName); err != nil {
-		return 0, err
-	}
-	if err = s3utils.CheckValidObjectName(objectName); err != nil {
-		return 0, err
-	}
-
-	// Calculate the optimal parts info for a given size.
-	totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
-	if err != nil {
-		return 0, err
-	}
-
-	// Initiate a new multipart upload.
-	uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
-	if err != nil {
-		return 0, err
-	}
-
-	// Aborts the multipart upload in progress, if the
-	// function returns any error, since we do not resume
-	// we should purge the parts which have been uploaded
-	// to relinquish storage space.
-	defer func() {
-		if err != nil {
-			c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
-		}
-	}()
-
-	// Total data read and written to server. should be equal to 'size' at the end of the call.
-	var totalUploadedSize int64
-
-	// Complete multipart upload.
-	var complMultipartUpload completeMultipartUpload
-
-	// Declare a channel that sends the next part number to be uploaded.
-	// Buffered to 10000 because thats the maximum number of parts allowed
-	// by S3.
-	uploadPartsCh := make(chan uploadPartReq, 10000)
-
-	// Declare a channel that sends back the response of a part upload.
-	// Buffered to 10000 because thats the maximum number of parts allowed
-	// by S3.
-	uploadedPartsCh := make(chan uploadedPartRes, 10000)
-
-	// Used for readability, lastPartNumber is always totalPartsCount.
-	lastPartNumber := totalPartsCount
-
-	// Send each part number to the channel to be processed.
-	for p := 1; p <= totalPartsCount; p++ {
-		uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
-	}
-	close(uploadPartsCh)
-	// Receive each part number from the channel allowing three parallel uploads.
-	for w := 1; w <= opts.getNumThreads(); w++ {
-		go func(partSize int64) {
-			// Each worker will draw from the part channel and upload in parallel.
-			for uploadReq := range uploadPartsCh {
-
-				// If partNumber was not uploaded we calculate the missing
-				// part offset and size. For all other part numbers we
-				// calculate offset based on multiples of partSize.
-				readOffset := int64(uploadReq.PartNum-1) * partSize
-
-				// As a special case if partNumber is lastPartNumber, we
-				// calculate the offset based on the last part size.
-				if uploadReq.PartNum == lastPartNumber {
-					readOffset = (size - lastPartSize)
-					partSize = lastPartSize
-				}
-
-				// Get a section reader on a particular offset.
-				sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
-
-				// Proceed to upload the part.
-				var objPart ObjectPart
-				objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
-					sectionReader, uploadReq.PartNum,
-					"", "", partSize, opts.UserMetadata)
-				if err != nil {
-					uploadedPartsCh <- uploadedPartRes{
-						Size:  0,
-						Error: err,
-					}
-					// Exit the goroutine.
-					return
-				}
-
-				// Save successfully uploaded part metadata.
-				uploadReq.Part = &objPart
-
-				// Send successful part info through the channel.
-				uploadedPartsCh <- uploadedPartRes{
-					Size:    objPart.Size,
-					PartNum: uploadReq.PartNum,
-					Part:    uploadReq.Part,
-					Error:   nil,
-				}
-			}
-		}(partSize)
-	}
-
-	// Gather the responses as they occur and update any
-	// progress bar.
-	for u := 1; u <= totalPartsCount; u++ {
-		uploadRes := <-uploadedPartsCh
-		if uploadRes.Error != nil {
-			return totalUploadedSize, uploadRes.Error
-		}
-		// Retrieve each uploaded part and store it to be completed.
-		// part, ok := partsInfo[uploadRes.PartNum]
-		part := uploadRes.Part
-		if part == nil {
-			return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
-		}
-		// Update the totalUploadedSize.
-		totalUploadedSize += uploadRes.Size
-		// Store the parts to be completed in order.
-		complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
-			ETag:       part.ETag,
-			PartNumber: part.PartNumber,
-		})
-	}
-
-	// Verify if we uploaded all the data.
-	if totalUploadedSize != size {
-		return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
-	}
-
-	// Sort all completed parts.
-	sort.Sort(completedParts(complMultipartUpload.Parts))
-	_, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
-	if err != nil {
-		return totalUploadedSize, err
-	}
-
-	// Return final size.
-	return totalUploadedSize, nil
-}
-
-func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string,
-	reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
-	// Input validation.
-	if err = s3utils.CheckValidBucketName(bucketName); err != nil {
-		return 0, err
-	}
-	if err = s3utils.CheckValidObjectName(objectName); err != nil {
-		return 0, err
-	}
-
-	// Calculate the optimal parts info for a given size.
-	totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
-	if err != nil {
-		return 0, err
-	}
-	// Initiates a new multipart request
-	uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
-	if err != nil {
-		return 0, err
-	}
-
-	// Aborts the multipart upload if the function returns
-	// any error, since we do not resume we should purge
-	// the parts which have been uploaded to relinquish
-	// storage space.
-	defer func() {
-		if err != nil {
-			c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
-		}
-	}()
-
-	// Total data read and written to server. should be equal to 'size' at the end of the call.
-	var totalUploadedSize int64
-
-	// Initialize parts uploaded map.
-	partsInfo := make(map[int]ObjectPart)
-
-	// Part number always starts with '1'.
-	var partNumber int
-	for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
-		// Update progress reader appropriately to the latest offset
-		// as we read from the source.
-		hookReader := newHook(reader, opts.Progress)
-
-		// Proceed to upload the part.
-		if partNumber == totalPartsCount {
-			partSize = lastPartSize
-		}
-		var objPart ObjectPart
-		objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
-			io.LimitReader(hookReader, partSize),
-			partNumber, "", "", partSize, opts.UserMetadata)
-		if err != nil {
-			return totalUploadedSize, err
-		}
-
-		// Save successfully uploaded part metadata.
-		partsInfo[partNumber] = objPart
-
-		// Save successfully uploaded size.
-		totalUploadedSize += partSize
-	}
-
-	// Verify if we uploaded all the data.
-	if size > 0 {
-		if totalUploadedSize != size {
-			return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
-		}
-	}
-
-	// Complete multipart upload.
-	var complMultipartUpload completeMultipartUpload
-
-	// Loop over total uploaded parts to save them in
-	// Parts array before completing the multipart request.
-	for i := 1; i < partNumber; i++ {
-		part, ok := partsInfo[i]
-		if !ok {
-			return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
-		}
-		complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
-			ETag:       part.ETag,
-			PartNumber: part.PartNumber,
-		})
-	}
-
-	// Sort all completed parts.
-	sort.Sort(completedParts(complMultipartUpload.Parts))
-	_, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
-	if err != nil {
-		return totalUploadedSize, err
-	}
-
-	// Return final size.
-	return totalUploadedSize, nil
-}
-
-// putObjectNoChecksum special function used Google Cloud Storage. This special function
-// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return 0, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return 0, err
-	}
-
-	// Size -1 is only supported on Google Cloud Storage, we error
-	// out in all other situations.
-	if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) {
-		return 0, ErrEntityTooSmall(size, bucketName, objectName)
-	}
-	if size > 0 {
-		if isReadAt(reader) && !isObject(reader) {
-			seeker, _ := reader.(io.Seeker)
-			offset, err := seeker.Seek(0, io.SeekCurrent)
-			if err != nil {
-				return 0, ErrInvalidArgument(err.Error())
-			}
-			reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
-		}
-	}
-
-	// Update progress reader appropriately to the latest offset as we
-	// read from the source.
-	readSeeker := newHook(reader, opts.Progress)
-
-	// This function does not calculate sha256 and md5sum for payload.
-	// Execute put object.
-	st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts)
-	if err != nil {
-		return 0, err
-	}
-	if st.Size != size {
-		return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
-	}
-	return size, nil
-}
-
-// putObjectDo - executes the put object http operation.
-// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return ObjectInfo{}, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return ObjectInfo{}, err
-	}
-	// Set headers.
-	customHeader := opts.Header()
-
-	// Populate request metadata.
-	reqMetadata := requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		customHeader:     customHeader,
-		contentBody:      reader,
-		contentLength:    size,
-		contentMD5Base64: md5Base64,
-		contentSHA256Hex: sha256Hex,
-	}
-
-	// Execute PUT an objectName.
-	resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
-	defer closeResponse(resp)
-	if err != nil {
-		return ObjectInfo{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-
-	var objInfo ObjectInfo
-	// Trim off the odd double quotes from ETag in the beginning and end.
-	objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
-	objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
-	// A success here means data was written to server successfully.
-	objInfo.Size = size
-
-	// Return here.
-	return objInfo, nil
-}

+ 0 - 258
vendor/github.com/minio/minio-go/api-put-object.go

@@ -1,258 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"io"
-	"net/http"
-	"runtime/debug"
-	"sort"
-
-	"github.com/minio/minio-go/pkg/encrypt"
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// PutObjectOptions represents options specified by user for PutObject call
-type PutObjectOptions struct {
-	UserMetadata       map[string]string
-	Progress           io.Reader
-	ContentType        string
-	ContentEncoding    string
-	ContentDisposition string
-	CacheControl       string
-	EncryptMaterials   encrypt.Materials
-	NumThreads         uint
-	StorageClass       string
-}
-
-// getNumThreads - gets the number of threads to be used in the multipart
-// put object operation
-func (opts PutObjectOptions) getNumThreads() (numThreads int) {
-	if opts.NumThreads > 0 {
-		numThreads = int(opts.NumThreads)
-	} else {
-		numThreads = totalWorkers
-	}
-	return
-}
-
-// Header - constructs the headers from metadata entered by user in
-// PutObjectOptions struct
-func (opts PutObjectOptions) Header() (header http.Header) {
-	header = make(http.Header)
-
-	if opts.ContentType != "" {
-		header["Content-Type"] = []string{opts.ContentType}
-	} else {
-		header["Content-Type"] = []string{"application/octet-stream"}
-	}
-	if opts.ContentEncoding != "" {
-		header["Content-Encoding"] = []string{opts.ContentEncoding}
-	}
-	if opts.ContentDisposition != "" {
-		header["Content-Disposition"] = []string{opts.ContentDisposition}
-	}
-	if opts.CacheControl != "" {
-		header["Cache-Control"] = []string{opts.CacheControl}
-	}
-	if opts.EncryptMaterials != nil {
-		header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()}
-		header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()}
-		header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()}
-	}
-	if opts.StorageClass != "" {
-		header[amzStorageClass] = []string{opts.StorageClass}
-	}
-	for k, v := range opts.UserMetadata {
-		if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) && !isStorageClassHeader(k) {
-			header["X-Amz-Meta-"+k] = []string{v}
-		} else {
-			header[k] = []string{v}
-		}
-	}
-	return
-}
-
-// validate() checks if the UserMetadata map has standard headers or client side
-// encryption headers and raises an error if so.
-func (opts PutObjectOptions) validate() (err error) {
-	for k := range opts.UserMetadata {
-		if isStandardHeader(k) || isCSEHeader(k) || isStorageClassHeader(k) {
-			return ErrInvalidArgument(k + " unsupported request parameter for user defined metadata from minio-go")
-		}
-	}
-	return nil
-}
-
-// completedParts is a collection of parts sortable by their part numbers.
-// used for sorting the uploaded parts before completing the multipart request.
-type completedParts []CompletePart
-
-func (a completedParts) Len() int           { return len(a) }
-func (a completedParts) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
-func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
-
-// PutObject creates an object in a bucket.
-//
-// You must have WRITE permissions on a bucket to create an object.
-//
-//  - For size smaller than 64MiB PutObject automatically does a
-//    single atomic Put operation.
-//  - For size larger than 64MiB PutObject automatically does a
-//    multipart Put operation.
-//  - For size input as -1 PutObject does a multipart Put operation
-//    until input stream reaches EOF. Maximum object size that can
-//    be uploaded through this operation will be 5TiB.
-func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,
-	opts PutObjectOptions) (n int64, err error) {
-	return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts)
-}
-
-func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
-	// Check for largest object size allowed.
-	if size > int64(maxMultipartPutObjectSize) {
-		return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
-	}
-
-	// NOTE: Streaming signature is not supported by GCS.
-	if s3utils.IsGoogleEndpoint(c.endpointURL) {
-		// Do not compute MD5 for Google Cloud Storage.
-		return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
-	}
-
-	if c.overrideSignerType.IsV2() {
-		if size >= 0 && size < minPartSize {
-			return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
-		}
-		return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
-	}
-	if size < 0 {
-		return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
-	}
-
-	if size < minPartSize {
-		return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
-	}
-	// For all sizes greater than 64MiB do multipart.
-	return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
-}
-
-func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
-	// Input validation.
-	if err = s3utils.CheckValidBucketName(bucketName); err != nil {
-		return 0, err
-	}
-	if err = s3utils.CheckValidObjectName(objectName); err != nil {
-		return 0, err
-	}
-
-	// Total data read and written to server. should be equal to
-	// 'size' at the end of the call.
-	var totalUploadedSize int64
-
-	// Complete multipart upload.
-	var complMultipartUpload completeMultipartUpload
-
-	// Calculate the optimal parts info for a given size.
-	totalPartsCount, partSize, _, err := optimalPartInfo(-1)
-	if err != nil {
-		return 0, err
-	}
-	// Initiate a new multipart upload.
-	uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
-	if err != nil {
-		return 0, err
-	}
-
-	defer func() {
-		if err != nil {
-			c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
-		}
-	}()
-
-	// Part number always starts with '1'.
-	partNumber := 1
-
-	// Initialize parts uploaded map.
-	partsInfo := make(map[int]ObjectPart)
-
-	// Create a buffer.
-	buf := make([]byte, partSize)
-	defer debug.FreeOSMemory()
-
-	for partNumber <= totalPartsCount {
-		length, rErr := io.ReadFull(reader, buf)
-		if rErr == io.EOF && partNumber > 1 {
-			break
-		}
-		if rErr != nil && rErr != io.ErrUnexpectedEOF {
-			return 0, rErr
-		}
-		// Update progress reader appropriately to the latest offset
-		// as we read from the source.
-		rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
-
-		// Proceed to upload the part.
-		var objPart ObjectPart
-		objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
-			"", "", int64(length), opts.UserMetadata)
-		if err != nil {
-			return totalUploadedSize, err
-		}
-
-		// Save successfully uploaded part metadata.
-		partsInfo[partNumber] = objPart
-
-		// Save successfully uploaded size.
-		totalUploadedSize += int64(length)
-
-		// Increment part number.
-		partNumber++
-
-		// For unknown size, Read EOF we break away.
-		// We do not have to upload till totalPartsCount.
-		if rErr == io.EOF {
-			break
-		}
-	}
-
-	// Loop over total uploaded parts to save them in
-	// Parts array before completing the multipart request.
-	for i := 1; i < partNumber; i++ {
-		part, ok := partsInfo[i]
-		if !ok {
-			return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
-		}
-		complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
-			ETag:       part.ETag,
-			PartNumber: part.PartNumber,
-		})
-	}
-
-	// Sort all completed parts.
-	sort.Sort(completedParts(complMultipartUpload.Parts))
-	if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
-		return totalUploadedSize, err
-	}
-
-	// Return final size.
-	return totalUploadedSize, nil
-}

+ 0 - 290
vendor/github.com/minio/minio-go/api-remove.go

@@ -1,290 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"bytes"
-	"context"
-	"encoding/xml"
-	"io"
-	"net/http"
-	"net/url"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// RemoveBucket deletes the bucket name.
-//
-//  All objects (including all object versions and delete markers).
-//  in the bucket must be deleted before successfully attempting this request.
-func (c Client) RemoveBucket(bucketName string) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-	// Execute DELETE on bucket.
-	resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
-		bucketName:       bucketName,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusNoContent {
-			return httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-
-	// Remove the location from cache on a successful delete.
-	c.bucketLocCache.Delete(bucketName)
-
-	return nil
-}
-
-// RemoveObject remove an object from a bucket.
-func (c Client) RemoveObject(bucketName, objectName string) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return err
-	}
-	// Execute DELETE on objectName.
-	resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return err
-	}
-	if resp != nil {
-		// if some unexpected error happened and max retry is reached, we want to let client know
-		if resp.StatusCode != http.StatusNoContent {
-			return httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-
-	// DeleteObject always responds with http '204' even for
-	// objects which do not exist. So no need to handle them
-	// specifically.
-	return nil
-}
-
-// RemoveObjectError - container of Multi Delete S3 API error
-type RemoveObjectError struct {
-	ObjectName string
-	Err        error
-}
-
-// generateRemoveMultiObjects - generate the XML request for remove multi objects request
-func generateRemoveMultiObjectsRequest(objects []string) []byte {
-	rmObjects := []deleteObject{}
-	for _, obj := range objects {
-		rmObjects = append(rmObjects, deleteObject{Key: obj})
-	}
-	xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
-	return xmlBytes
-}
-
-// processRemoveMultiObjectsResponse - parse the remove multi objects web service
-// and return the success/failure result status for each object
-func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
-	// Parse multi delete XML response
-	rmResult := &deleteMultiObjectsResult{}
-	err := xmlDecoder(body, rmResult)
-	if err != nil {
-		errorCh <- RemoveObjectError{ObjectName: "", Err: err}
-		return
-	}
-
-	// Fill deletion that returned an error.
-	for _, obj := range rmResult.UnDeletedObjects {
-		errorCh <- RemoveObjectError{
-			ObjectName: obj.Key,
-			Err: ErrorResponse{
-				Code:    obj.Code,
-				Message: obj.Message,
-			},
-		}
-	}
-}
-
-// RemoveObjects remove multiples objects from a bucket.
-// The list of objects to remove are received from objectsCh.
-// Remove failures are sent back via error channel.
-func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
-	errorCh := make(chan RemoveObjectError, 1)
-
-	// Validate if bucket name is valid.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		defer close(errorCh)
-		errorCh <- RemoveObjectError{
-			Err: err,
-		}
-		return errorCh
-	}
-	// Validate objects channel to be properly allocated.
-	if objectsCh == nil {
-		defer close(errorCh)
-		errorCh <- RemoveObjectError{
-			Err: ErrInvalidArgument("Objects channel cannot be nil"),
-		}
-		return errorCh
-	}
-
-	// Generate and call MultiDelete S3 requests based on entries received from objectsCh
-	go func(errorCh chan<- RemoveObjectError) {
-		maxEntries := 1000
-		finish := false
-		urlValues := make(url.Values)
-		urlValues.Set("delete", "")
-
-		// Close error channel when Multi delete finishes.
-		defer close(errorCh)
-
-		// Loop over entries by 1000 and call MultiDelete requests
-		for {
-			if finish {
-				break
-			}
-			count := 0
-			var batch []string
-
-			// Try to gather 1000 entries
-			for object := range objectsCh {
-				batch = append(batch, object)
-				if count++; count >= maxEntries {
-					break
-				}
-			}
-			if count == 0 {
-				// Multi Objects Delete API doesn't accept empty object list, quit immediately
-				break
-			}
-			if count < maxEntries {
-				// We didn't have 1000 entries, so this is the last batch
-				finish = true
-			}
-
-			// Generate remove multi objects XML request
-			removeBytes := generateRemoveMultiObjectsRequest(batch)
-			// Execute GET on bucket to list objects.
-			resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{
-				bucketName:       bucketName,
-				queryValues:      urlValues,
-				contentBody:      bytes.NewReader(removeBytes),
-				contentLength:    int64(len(removeBytes)),
-				contentMD5Base64: sumMD5Base64(removeBytes),
-				contentSHA256Hex: sum256Hex(removeBytes),
-			})
-			if err != nil {
-				for _, b := range batch {
-					errorCh <- RemoveObjectError{ObjectName: b, Err: err}
-				}
-				continue
-			}
-
-			// Process multiobjects remove xml response
-			processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
-
-			closeResponse(resp)
-		}
-	}(errorCh)
-	return errorCh
-}
-
-// RemoveIncompleteUpload aborts an partially uploaded object.
-func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return err
-	}
-	// Find multipart upload id of the object to be aborted.
-	uploadID, err := c.findUploadID(bucketName, objectName)
-	if err != nil {
-		return err
-	}
-	if uploadID != "" {
-		// Upload id found, abort the incomplete multipart upload.
-		err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// abortMultipartUpload aborts a multipart upload for the given
-// uploadID, all previously uploaded parts are deleted.
-func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return err
-	}
-
-	// Initialize url queries.
-	urlValues := make(url.Values)
-	urlValues.Set("uploadId", uploadID)
-
-	// Execute DELETE on multipart upload.
-	resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		queryValues:      urlValues,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusNoContent {
-			// Abort has no response body, handle it for any errors.
-			var errorResponse ErrorResponse
-			switch resp.StatusCode {
-			case http.StatusNotFound:
-				// This is needed specifically for abort and it cannot
-				// be converged into default case.
-				errorResponse = ErrorResponse{
-					Code:       "NoSuchUpload",
-					Message:    "The specified multipart upload does not exist.",
-					BucketName: bucketName,
-					Key:        objectName,
-					RequestID:  resp.Header.Get("x-amz-request-id"),
-					HostID:     resp.Header.Get("x-amz-id-2"),
-					Region:     resp.Header.Get("x-amz-bucket-region"),
-				}
-			default:
-				return httpRespToErrorResponse(resp, bucketName, objectName)
-			}
-			return errorResponse
-		}
-	}
-	return nil
-}

+ 0 - 245
vendor/github.com/minio/minio-go/api-s3-datatypes.go

@@ -1,245 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"encoding/xml"
-	"time"
-)
-
-// listAllMyBucketsResult container for listBuckets response.
-type listAllMyBucketsResult struct {
-	// Container for one or more buckets.
-	Buckets struct {
-		Bucket []BucketInfo
-	}
-	Owner owner
-}
-
-// owner container for bucket owner information.
-type owner struct {
-	DisplayName string
-	ID          string
-}
-
-// CommonPrefix container for prefix response.
-type CommonPrefix struct {
-	Prefix string
-}
-
-// ListBucketV2Result container for listObjects response version 2.
-type ListBucketV2Result struct {
-	// A response can contain CommonPrefixes only if you have
-	// specified a delimiter.
-	CommonPrefixes []CommonPrefix
-	// Metadata about each object returned.
-	Contents  []ObjectInfo
-	Delimiter string
-
-	// Encoding type used to encode object keys in the response.
-	EncodingType string
-
-	// A flag that indicates whether or not ListObjects returned all of the results
-	// that satisfied the search criteria.
-	IsTruncated bool
-	MaxKeys     int64
-	Name        string
-
-	// Hold the token that will be sent in the next request to fetch the next group of keys
-	NextContinuationToken string
-
-	ContinuationToken string
-	Prefix            string
-
-	// FetchOwner and StartAfter are currently not used
-	FetchOwner string
-	StartAfter string
-}
-
-// ListBucketResult container for listObjects response.
-type ListBucketResult struct {
-	// A response can contain CommonPrefixes only if you have
-	// specified a delimiter.
-	CommonPrefixes []CommonPrefix
-	// Metadata about each object returned.
-	Contents  []ObjectInfo
-	Delimiter string
-
-	// Encoding type used to encode object keys in the response.
-	EncodingType string
-
-	// A flag that indicates whether or not ListObjects returned all of the results
-	// that satisfied the search criteria.
-	IsTruncated bool
-	Marker      string
-	MaxKeys     int64
-	Name        string
-
-	// When response is truncated (the IsTruncated element value in
-	// the response is true), you can use the key name in this field
-	// as marker in the subsequent request to get next set of objects.
-	// Object storage lists objects in alphabetical order Note: This
-	// element is returned only if you have delimiter request
-	// parameter specified. If response does not include the NextMaker
-	// and it is truncated, you can use the value of the last Key in
-	// the response as the marker in the subsequent request to get the
-	// next set of object keys.
-	NextMarker string
-	Prefix     string
-}
-
-// ListMultipartUploadsResult container for ListMultipartUploads response
-type ListMultipartUploadsResult struct {
-	Bucket             string
-	KeyMarker          string
-	UploadIDMarker     string `xml:"UploadIdMarker"`
-	NextKeyMarker      string
-	NextUploadIDMarker string `xml:"NextUploadIdMarker"`
-	EncodingType       string
-	MaxUploads         int64
-	IsTruncated        bool
-	Uploads            []ObjectMultipartInfo `xml:"Upload"`
-	Prefix             string
-	Delimiter          string
-	// A response can contain CommonPrefixes only if you specify a delimiter.
-	CommonPrefixes []CommonPrefix
-}
-
-// initiator container for who initiated multipart upload.
-type initiator struct {
-	ID          string
-	DisplayName string
-}
-
-// copyObjectResult container for copy object response.
-type copyObjectResult struct {
-	ETag         string
-	LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
-}
-
-// ObjectPart container for particular part of an object.
-type ObjectPart struct {
-	// Part number identifies the part.
-	PartNumber int
-
-	// Date and time the part was uploaded.
-	LastModified time.Time
-
-	// Entity tag returned when the part was uploaded, usually md5sum
-	// of the part.
-	ETag string
-
-	// Size of the uploaded part data.
-	Size int64
-}
-
-// ListObjectPartsResult container for ListObjectParts response.
-type ListObjectPartsResult struct {
-	Bucket   string
-	Key      string
-	UploadID string `xml:"UploadId"`
-
-	Initiator initiator
-	Owner     owner
-
-	StorageClass         string
-	PartNumberMarker     int
-	NextPartNumberMarker int
-	MaxParts             int
-
-	// Indicates whether the returned list of parts is truncated.
-	IsTruncated bool
-	ObjectParts []ObjectPart `xml:"Part"`
-
-	EncodingType string
-}
-
-// initiateMultipartUploadResult container for InitiateMultiPartUpload
-// response.
-type initiateMultipartUploadResult struct {
-	Bucket   string
-	Key      string
-	UploadID string `xml:"UploadId"`
-}
-
-// completeMultipartUploadResult container for completed multipart
-// upload response.
-type completeMultipartUploadResult struct {
-	Location string
-	Bucket   string
-	Key      string
-	ETag     string
-}
-
-// CompletePart sub container lists individual part numbers and their
-// md5sum, part of completeMultipartUpload.
-type CompletePart struct {
-	XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
-
-	// Part number identifies the part.
-	PartNumber int
-	ETag       string
-}
-
-// completeMultipartUpload container for completing multipart upload.
-type completeMultipartUpload struct {
-	XMLName xml.Name       `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
-	Parts   []CompletePart `xml:"Part"`
-}
-
-// createBucketConfiguration container for bucket configuration.
-type createBucketConfiguration struct {
-	XMLName  xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
-	Location string   `xml:"LocationConstraint"`
-}
-
-// deleteObject container for Delete element in MultiObjects Delete XML request
-type deleteObject struct {
-	Key       string
-	VersionID string `xml:"VersionId,omitempty"`
-}
-
-// deletedObject container for Deleted element in MultiObjects Delete XML response
-type deletedObject struct {
-	Key       string
-	VersionID string `xml:"VersionId,omitempty"`
-	// These fields are ignored.
-	DeleteMarker          bool
-	DeleteMarkerVersionID string
-}
-
-// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
-type nonDeletedObject struct {
-	Key     string
-	Code    string
-	Message string
-}
-
-// deletedMultiObjects container for MultiObjects Delete XML request
-type deleteMultiObjects struct {
-	XMLName xml.Name `xml:"Delete"`
-	Quiet   bool
-	Objects []deleteObject `xml:"Object"`
-}
-
-// deletedMultiObjectsResult container for MultiObjects Delete XML response
-type deleteMultiObjectsResult struct {
-	XMLName          xml.Name           `xml:"DeleteResult"`
-	DeletedObjects   []deletedObject    `xml:"Deleted"`
-	UnDeletedObjects []nonDeletedObject `xml:"Error"`
-}

+ 0 - 178
vendor/github.com/minio/minio-go/api-stat.go

@@ -1,178 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"net/http"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// BucketExists verify if bucket exists and you have permission to access it.
-func (c Client) BucketExists(bucketName string) (bool, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return false, err
-	}
-
-	// Execute HEAD on bucketName.
-	resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{
-		bucketName:       bucketName,
-		contentSHA256Hex: emptySHA256Hex,
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		if ToErrorResponse(err).Code == "NoSuchBucket" {
-			return false, nil
-		}
-		return false, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return false, httpRespToErrorResponse(resp, bucketName, "")
-		}
-	}
-	return true, nil
-}
-
-// List of header keys to be filtered, usually
-// from all S3 API http responses.
-var defaultFilterKeys = []string{
-	"Connection",
-	"Transfer-Encoding",
-	"Accept-Ranges",
-	"Date",
-	"Server",
-	"Vary",
-	"x-amz-bucket-region",
-	"x-amz-request-id",
-	"x-amz-id-2",
-	// Add new headers to be ignored.
-}
-
-// Extract only necessary metadata header key/values by
-// filtering them out with a list of custom header keys.
-func extractObjMetadata(header http.Header) http.Header {
-	filterKeys := append([]string{
-		"ETag",
-		"Content-Length",
-		"Last-Modified",
-		"Content-Type",
-	}, defaultFilterKeys...)
-	return filterHeader(header, filterKeys)
-}
-
-// StatObject verifies if object exists and you have permission to access.
-func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return ObjectInfo{}, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return ObjectInfo{}, err
-	}
-	return c.statObject(context.Background(), bucketName, objectName, opts)
-}
-
-// Lower level API for statObject supporting pre-conditions and range headers.
-func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
-	// Input validation.
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return ObjectInfo{}, err
-	}
-	if err := s3utils.CheckValidObjectName(objectName); err != nil {
-		return ObjectInfo{}, err
-	}
-
-	// Execute HEAD on objectName.
-	resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{
-		bucketName:       bucketName,
-		objectName:       objectName,
-		contentSHA256Hex: emptySHA256Hex,
-		customHeader:     opts.Header(),
-	})
-	defer closeResponse(resp)
-	if err != nil {
-		return ObjectInfo{}, err
-	}
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
-		}
-	}
-
-	// Trim off the odd double quotes from ETag in the beginning and end.
-	md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
-	md5sum = strings.TrimSuffix(md5sum, "\"")
-
-	// Parse content length is exists
-	var size int64 = -1
-	contentLengthStr := resp.Header.Get("Content-Length")
-	if contentLengthStr != "" {
-		size, err = strconv.ParseInt(contentLengthStr, 10, 64)
-		if err != nil {
-			// Content-Length is not valid
-			return ObjectInfo{}, ErrorResponse{
-				Code:       "InternalError",
-				Message:    "Content-Length is invalid. " + reportIssue,
-				BucketName: bucketName,
-				Key:        objectName,
-				RequestID:  resp.Header.Get("x-amz-request-id"),
-				HostID:     resp.Header.Get("x-amz-id-2"),
-				Region:     resp.Header.Get("x-amz-bucket-region"),
-			}
-		}
-	}
-
-	// Parse Last-Modified has http time format.
-	date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
-	if err != nil {
-		return ObjectInfo{}, ErrorResponse{
-			Code:       "InternalError",
-			Message:    "Last-Modified time format is invalid. " + reportIssue,
-			BucketName: bucketName,
-			Key:        objectName,
-			RequestID:  resp.Header.Get("x-amz-request-id"),
-			HostID:     resp.Header.Get("x-amz-id-2"),
-			Region:     resp.Header.Get("x-amz-bucket-region"),
-		}
-	}
-
-	// Fetch content type if any present.
-	contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
-	if contentType == "" {
-		contentType = "application/octet-stream"
-	}
-
-	// Save object metadata info.
-	return ObjectInfo{
-		ETag:         md5sum,
-		Key:          objectName,
-		Size:         size,
-		LastModified: date,
-		ContentType:  contentType,
-		// Extract only the relevant header keys describing the object.
-		// following function filters out a list of standard set of keys
-		// which are not part of object metadata.
-		Metadata: extractObjMetadata(resp.Header),
-	}, nil
-}

+ 0 - 832
vendor/github.com/minio/minio-go/api.go

@@ -1,832 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"bytes"
-	"context"
-	"crypto/md5"
-	"crypto/sha256"
-	"errors"
-	"fmt"
-	"hash"
-	"io"
-	"io/ioutil"
-	"math/rand"
-	"net"
-	"net/http"
-	"net/http/httputil"
-	"net/url"
-	"os"
-	"runtime"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/minio/minio-go/pkg/credentials"
-	"github.com/minio/minio-go/pkg/s3signer"
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// Client implements Amazon S3 compatible methods.
-type Client struct {
-	///  Standard options.
-
-	// Parsed endpoint url provided by the user.
-	endpointURL url.URL
-
-	// Holds various credential providers.
-	credsProvider *credentials.Credentials
-
-	// Custom signerType value overrides all credentials.
-	overrideSignerType credentials.SignatureType
-
-	// User supplied.
-	appInfo struct {
-		appName    string
-		appVersion string
-	}
-
-	// Indicate whether we are using https or not
-	secure bool
-
-	// Needs allocation.
-	httpClient     *http.Client
-	bucketLocCache *bucketLocationCache
-
-	// Advanced functionality.
-	isTraceEnabled bool
-	traceOutput    io.Writer
-
-	// S3 specific accelerated endpoint.
-	s3AccelerateEndpoint string
-
-	// Region endpoint
-	region string
-
-	// Random seed.
-	random *rand.Rand
-}
-
-// Global constants.
-const (
-	libraryName    = "minio-go"
-	libraryVersion = "4.0.6"
-)
-
-// User Agent should always following the below style.
-// Please open an issue to discuss any new changes here.
-//
-//       Minio (OS; ARCH) LIB/VER APP/VER
-const (
-	libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") "
-	libraryUserAgent       = libraryUserAgentPrefix + libraryName + "/" + libraryVersion
-)
-
-// NewV2 - instantiate minio client with Amazon S3 signature version
-// '2' compatibility.
-func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
-	creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
-	clnt, err := privateNew(endpoint, creds, secure, "")
-	if err != nil {
-		return nil, err
-	}
-	clnt.overrideSignerType = credentials.SignatureV2
-	return clnt, nil
-}
-
-// NewV4 - instantiate minio client with Amazon S3 signature version
-// '4' compatibility.
-func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
-	creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
-	clnt, err := privateNew(endpoint, creds, secure, "")
-	if err != nil {
-		return nil, err
-	}
-	clnt.overrideSignerType = credentials.SignatureV4
-	return clnt, nil
-}
-
-// New - instantiate minio client, adds automatic verification of signature.
-func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
-	creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
-	clnt, err := privateNew(endpoint, creds, secure, "")
-	if err != nil {
-		return nil, err
-	}
-	// Google cloud storage should be set to signature V2, force it if not.
-	if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
-		clnt.overrideSignerType = credentials.SignatureV2
-	}
-	// If Amazon S3 set to signature v4.
-	if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
-		clnt.overrideSignerType = credentials.SignatureV4
-	}
-	return clnt, nil
-}
-
-// NewWithCredentials - instantiate minio client with credentials provider
-// for retrieving credentials from various credentials provider such as
-// IAM, File, Env etc.
-func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
-	return privateNew(endpoint, creds, secure, region)
-}
-
-// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
-// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
-// Use this function when if your application deals with single region.
-func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
-	creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
-	return privateNew(endpoint, creds, secure, region)
-}
-
-// lockedRandSource provides protected rand source, implements rand.Source interface.
-type lockedRandSource struct {
-	lk  sync.Mutex
-	src rand.Source
-}
-
-// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
-func (r *lockedRandSource) Int63() (n int64) {
-	r.lk.Lock()
-	n = r.src.Int63()
-	r.lk.Unlock()
-	return
-}
-
-// Seed uses the provided seed value to initialize the generator to a
-// deterministic state.
-func (r *lockedRandSource) Seed(seed int64) {
-	r.lk.Lock()
-	r.src.Seed(seed)
-	r.lk.Unlock()
-}
-
-// getRegionFromURL - parse region from URL if present.
-func getRegionFromURL(u url.URL) (region string) {
-	region = ""
-	if s3utils.IsGoogleEndpoint(u) {
-		return
-	} else if s3utils.IsAmazonChinaEndpoint(u) {
-		// For china specifically we need to set everything to
-		// cn-north-1 for now, there is no easier way until AWS S3
-		// provides a cleaner compatible API across "us-east-1" and
-		// China region.
-		return "cn-north-1"
-	} else if s3utils.IsAmazonGovCloudEndpoint(u) {
-		// For us-gov specifically we need to set everything to
-		// us-gov-west-1 for now, there is no easier way until AWS S3
-		// provides a cleaner compatible API across "us-east-1" and
-		// Gov cloud region.
-		return "us-gov-west-1"
-	}
-	parts := s3utils.AmazonS3Host.FindStringSubmatch(u.Host)
-	if len(parts) > 1 {
-		region = parts[1]
-	}
-	return region
-}
-
-func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
-	// construct endpoint.
-	endpointURL, err := getEndpointURL(endpoint, secure)
-	if err != nil {
-		return nil, err
-	}
-
-	// instantiate new Client.
-	clnt := new(Client)
-
-	// Save the credentials.
-	clnt.credsProvider = creds
-
-	// Remember whether we are using https or not
-	clnt.secure = secure
-
-	// Save endpoint URL, user agent for future uses.
-	clnt.endpointURL = *endpointURL
-
-	// Instantiate http client and bucket location cache.
-	clnt.httpClient = &http.Client{
-		Transport: defaultMinioTransport,
-	}
-
-	// Sets custom region, if region is empty bucket location cache is used automatically.
-	if region == "" {
-		region = getRegionFromURL(clnt.endpointURL)
-	}
-	clnt.region = region
-
-	// Instantiate bucket location cache.
-	clnt.bucketLocCache = newBucketLocationCache()
-
-	// Introduce a new locked random seed.
-	clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())})
-
-	// Return.
-	return clnt, nil
-}
-
-// SetAppInfo - add application details to user agent.
-func (c *Client) SetAppInfo(appName string, appVersion string) {
-	// if app name and version not set, we do not set a new user agent.
-	if appName != "" && appVersion != "" {
-		c.appInfo = struct {
-			appName    string
-			appVersion string
-		}{}
-		c.appInfo.appName = appName
-		c.appInfo.appVersion = appVersion
-	}
-}
-
-// SetCustomTransport - set new custom transport.
-func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
-	// Set this to override default transport
-	// ``http.DefaultTransport``.
-	//
-	// This transport is usually needed for debugging OR to add your
-	// own custom TLS certificates on the client transport, for custom
-	// CA's and certs which are not part of standard certificate
-	// authority follow this example :-
-	//
-	//   tr := &http.Transport{
-	//           TLSClientConfig:    &tls.Config{RootCAs: pool},
-	//           DisableCompression: true,
-	//   }
-	//   api.SetTransport(tr)
-	//
-	if c.httpClient != nil {
-		c.httpClient.Transport = customHTTPTransport
-	}
-}
-
-// TraceOn - enable HTTP tracing.
-func (c *Client) TraceOn(outputStream io.Writer) {
-	// if outputStream is nil then default to os.Stdout.
-	if outputStream == nil {
-		outputStream = os.Stdout
-	}
-	// Sets a new output stream.
-	c.traceOutput = outputStream
-
-	// Enable tracing.
-	c.isTraceEnabled = true
-}
-
-// TraceOff - disable HTTP tracing.
-func (c *Client) TraceOff() {
-	// Disable tracing.
-	c.isTraceEnabled = false
-}
-
-// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
-// requests. This feature is only specific to S3 for all other endpoints this
-// function does nothing. To read further details on s3 transfer acceleration
-// please vist -
-// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
-func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
-	if s3utils.IsAmazonEndpoint(c.endpointURL) {
-		c.s3AccelerateEndpoint = accelerateEndpoint
-	}
-}
-
-// Hash materials provides relevant initialized hash algo writers
-// based on the expected signature type.
-//
-//  - For signature v4 request if the connection is insecure compute only sha256.
-//  - For signature v4 request if the connection is secure compute only md5.
-//  - For anonymous request compute md5.
-func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
-	hashSums = make(map[string][]byte)
-	hashAlgos = make(map[string]hash.Hash)
-	if c.overrideSignerType.IsV4() {
-		if c.secure {
-			hashAlgos["md5"] = md5.New()
-		} else {
-			hashAlgos["sha256"] = sha256.New()
-		}
-	} else {
-		if c.overrideSignerType.IsAnonymous() {
-			hashAlgos["md5"] = md5.New()
-		}
-	}
-	return hashAlgos, hashSums
-}
-
-// requestMetadata - is container for all the values to make a request.
-type requestMetadata struct {
-	// If set newRequest presigns the URL.
-	presignURL bool
-
-	// User supplied.
-	bucketName   string
-	objectName   string
-	queryValues  url.Values
-	customHeader http.Header
-	expires      int64
-
-	// Generated by our internal code.
-	bucketLocation   string
-	contentBody      io.Reader
-	contentLength    int64
-	contentMD5Base64 string // carries base64 encoded md5sum
-	contentSHA256Hex string // carries hex encoded sha256sum
-}
-
-// dumpHTTP - dump HTTP request and response.
-func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
-	// Starts http dump.
-	_, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------")
-	if err != nil {
-		return err
-	}
-
-	// Filter out Signature field from Authorization header.
-	origAuth := req.Header.Get("Authorization")
-	if origAuth != "" {
-		req.Header.Set("Authorization", redactSignature(origAuth))
-	}
-
-	// Only display request header.
-	reqTrace, err := httputil.DumpRequestOut(req, false)
-	if err != nil {
-		return err
-	}
-
-	// Write request to trace output.
-	_, err = fmt.Fprint(c.traceOutput, string(reqTrace))
-	if err != nil {
-		return err
-	}
-
-	// Only display response header.
-	var respTrace []byte
-
-	// For errors we make sure to dump response body as well.
-	if resp.StatusCode != http.StatusOK &&
-		resp.StatusCode != http.StatusPartialContent &&
-		resp.StatusCode != http.StatusNoContent {
-		respTrace, err = httputil.DumpResponse(resp, true)
-		if err != nil {
-			return err
-		}
-	} else {
-		// WORKAROUND for https://github.com/golang/go/issues/13942.
-		// httputil.DumpResponse does not print response headers for
-		// all successful calls which have response ContentLength set
-		// to zero. Keep this workaround until the above bug is fixed.
-		if resp.ContentLength == 0 {
-			var buffer bytes.Buffer
-			if err = resp.Header.Write(&buffer); err != nil {
-				return err
-			}
-			respTrace = buffer.Bytes()
-			respTrace = append(respTrace, []byte("\r\n")...)
-		} else {
-			respTrace, err = httputil.DumpResponse(resp, false)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	// Write response to trace output.
-	_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
-	if err != nil {
-		return err
-	}
-
-	// Ends the http dump.
-	_, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------")
-	if err != nil {
-		return err
-	}
-
-	// Returns success.
-	return nil
-}
-
-// do - execute http request.
-func (c Client) do(req *http.Request) (*http.Response, error) {
-	var resp *http.Response
-	var err error
-	// Do the request in a loop in case of 307 http is met since golang still doesn't
-	// handle properly this situation (https://github.com/golang/go/issues/7912)
-	for {
-		resp, err = c.httpClient.Do(req)
-		if err != nil {
-			// Handle this specifically for now until future Golang
-			// versions fix this issue properly.
-			urlErr, ok := err.(*url.Error)
-			if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
-				return nil, &url.Error{
-					Op:  urlErr.Op,
-					URL: urlErr.URL,
-					Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
-				}
-			}
-			return nil, err
-		}
-		// Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
-		if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
-			newURL, err := url.Parse(resp.Header.Get("Location"))
-			if err != nil {
-				break
-			}
-			req.URL = newURL
-		} else {
-			break
-		}
-	}
-
-	// Response cannot be non-nil, report if its the case.
-	if resp == nil {
-		msg := "Response is empty. " + reportIssue
-		return nil, ErrInvalidArgument(msg)
-	}
-
-	// If trace is enabled, dump http request and response.
-	if c.isTraceEnabled {
-		err = c.dumpHTTP(req, resp)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return resp, nil
-}
-
-// List of success status.
-var successStatus = []int{
-	http.StatusOK,
-	http.StatusNoContent,
-	http.StatusPartialContent,
-}
-
-// executeMethod - instantiates a given method, and retries the
-// request upon any error up to maxRetries attempts in a binomially
-// delayed manner using a standard back off algorithm.
-func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
-	var isRetryable bool     // Indicates if request can be retried.
-	var bodySeeker io.Seeker // Extracted seeker from io.Reader.
-	var reqRetry = MaxRetry  // Indicates how many times we can retry the request
-
-	if metadata.contentBody != nil {
-		// Check if body is seekable then it is retryable.
-		bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
-		switch bodySeeker {
-		case os.Stdin, os.Stdout, os.Stderr:
-			isRetryable = false
-		}
-		// Retry only when reader is seekable
-		if !isRetryable {
-			reqRetry = 1
-		}
-
-		// Figure out if the body can be closed - if yes
-		// we will definitely close it upon the function
-		// return.
-		bodyCloser, ok := metadata.contentBody.(io.Closer)
-		if ok {
-			defer bodyCloser.Close()
-		}
-	}
-
-	// Create a done channel to control 'newRetryTimer' go routine.
-	doneCh := make(chan struct{}, 1)
-
-	// Indicate to our routine to exit cleanly upon return.
-	defer close(doneCh)
-
-	// Blank indentifier is kept here on purpose since 'range' without
-	// blank identifiers is only supported since go1.4
-	// https://golang.org/doc/go1.4#forrange.
-	for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
-		// Retry executes the following function body if request has an
-		// error until maxRetries have been exhausted, retry attempts are
-		// performed after waiting for a given period of time in a
-		// binomial fashion.
-		if isRetryable {
-			// Seek back to beginning for each attempt.
-			if _, err = bodySeeker.Seek(0, 0); err != nil {
-				// If seek failed, no need to retry.
-				return nil, err
-			}
-		}
-
-		// Instantiate a new request.
-		var req *http.Request
-		req, err = c.newRequest(method, metadata)
-		if err != nil {
-			errResponse := ToErrorResponse(err)
-			if isS3CodeRetryable(errResponse.Code) {
-				continue // Retry.
-			}
-			return nil, err
-		}
-		// Add context to request
-		req = req.WithContext(ctx)
-
-		// Initiate the request.
-		res, err = c.do(req)
-		if err != nil {
-			// For supported network errors verify.
-			if isNetErrorRetryable(err) {
-				continue // Retry.
-			}
-			// For other errors, return here no need to retry.
-			return nil, err
-		}
-
-		// For any known successful http status, return quickly.
-		for _, httpStatus := range successStatus {
-			if httpStatus == res.StatusCode {
-				return res, nil
-			}
-		}
-
-		// Read the body to be saved later.
-		errBodyBytes, err := ioutil.ReadAll(res.Body)
-		// res.Body should be closed
-		closeResponse(res)
-		if err != nil {
-			return nil, err
-		}
-
-		// Save the body.
-		errBodySeeker := bytes.NewReader(errBodyBytes)
-		res.Body = ioutil.NopCloser(errBodySeeker)
-
-		// For errors verify if its retryable otherwise fail quickly.
-		errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
-
-		// Save the body back again.
-		errBodySeeker.Seek(0, 0) // Seek back to starting point.
-		res.Body = ioutil.NopCloser(errBodySeeker)
-
-		// Bucket region if set in error response and the error
-		// code dictates invalid region, we can retry the request
-		// with the new region.
-		//
-		// Additionally we should only retry if bucketLocation and custom
-		// region is empty.
-		if metadata.bucketLocation == "" && c.region == "" {
-			if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
-				if metadata.bucketName != "" && errResponse.Region != "" {
-					// Gather Cached location only if bucketName is present.
-					if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
-						c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
-						continue // Retry.
-					}
-				}
-			}
-		}
-
-		// Verify if error response code is retryable.
-		if isS3CodeRetryable(errResponse.Code) {
-			continue // Retry.
-		}
-
-		// Verify if http status code is retryable.
-		if isHTTPStatusRetryable(res.StatusCode) {
-			continue // Retry.
-		}
-
-		// For all other cases break out of the retry loop.
-		break
-	}
-	return res, err
-}
-
-// newRequest - instantiate a new HTTP request for a given method.
-func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) {
-	// If no method is supplied default to 'POST'.
-	if method == "" {
-		method = "POST"
-	}
-
-	location := metadata.bucketLocation
-	if location == "" {
-		if metadata.bucketName != "" {
-			// Gather location only if bucketName is present.
-			location, err = c.getBucketLocation(metadata.bucketName)
-			if err != nil {
-				if ToErrorResponse(err).Code != "AccessDenied" {
-					return nil, err
-				}
-			}
-			// Upon AccessDenied error on fetching bucket location, default
-			// to possible locations based on endpoint URL. This can usually
-			// happen when GetBucketLocation() is disabled using IAM policies.
-		}
-		if location == "" {
-			location = getDefaultLocation(c.endpointURL, c.region)
-		}
-	}
-
-	// Construct a new target URL.
-	targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues)
-	if err != nil {
-		return nil, err
-	}
-
-	// Initialize a new HTTP request for the method.
-	req, err = http.NewRequest(method, targetURL.String(), nil)
-	if err != nil {
-		return nil, err
-	}
-
-	// Get credentials from the configured credentials provider.
-	value, err := c.credsProvider.Get()
-	if err != nil {
-		return nil, err
-	}
-
-	var (
-		signerType      = value.SignerType
-		accessKeyID     = value.AccessKeyID
-		secretAccessKey = value.SecretAccessKey
-		sessionToken    = value.SessionToken
-	)
-
-	// Custom signer set then override the behavior.
-	if c.overrideSignerType != credentials.SignatureDefault {
-		signerType = c.overrideSignerType
-	}
-
-	// If signerType returned by credentials helper is anonymous,
-	// then do not sign regardless of signerType override.
-	if value.SignerType == credentials.SignatureAnonymous {
-		signerType = credentials.SignatureAnonymous
-	}
-
-	// Generate presign url if needed, return right here.
-	if metadata.expires != 0 && metadata.presignURL {
-		if signerType.IsAnonymous() {
-			return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
-		}
-		if signerType.IsV2() {
-			// Presign URL with signature v2.
-			req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires)
-		} else if signerType.IsV4() {
-			// Presign URL with signature v4.
-			req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
-		}
-		return req, nil
-	}
-
-	// Set 'User-Agent' header for the request.
-	c.setUserAgent(req)
-
-	// Set all headers.
-	for k, v := range metadata.customHeader {
-		req.Header.Set(k, v[0])
-	}
-
-	// Go net/http notoriously closes the request body.
-	// - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
-	// This can cause underlying *os.File seekers to fail, avoid that
-	// by making sure to wrap the closer as a nop.
-	if metadata.contentLength == 0 {
-		req.Body = nil
-	} else {
-		req.Body = ioutil.NopCloser(metadata.contentBody)
-	}
-
-	// Set incoming content-length.
-	req.ContentLength = metadata.contentLength
-	if req.ContentLength <= -1 {
-		// For unknown content length, we upload using transfer-encoding: chunked.
-		req.TransferEncoding = []string{"chunked"}
-	}
-
-	// set md5Sum for content protection.
-	if len(metadata.contentMD5Base64) > 0 {
-		req.Header.Set("Content-Md5", metadata.contentMD5Base64)
-	}
-
-	// For anonymous requests just return.
-	if signerType.IsAnonymous() {
-		return req, nil
-	}
-
-	switch {
-	case signerType.IsV2():
-		// Add signature version '2' authorization header.
-		req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
-	case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
-		// Streaming signature is used by default for a PUT object request. Additionally we also
-		// look if the initialized client is secure, if yes then we don't need to perform
-		// streaming signature.
-		req = s3signer.StreamingSignV4(req, accessKeyID,
-			secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
-	default:
-		// Set sha256 sum for signature calculation only with signature version '4'.
-		shaHeader := unsignedPayload
-		if metadata.contentSHA256Hex != "" {
-			shaHeader = metadata.contentSHA256Hex
-		}
-		req.Header.Set("X-Amz-Content-Sha256", shaHeader)
-
-		// Add signature version '4' authorization header.
-		req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
-	}
-
-	// Return request.
-	return req, nil
-}
-
-// set User agent.
-func (c Client) setUserAgent(req *http.Request) {
-	req.Header.Set("User-Agent", libraryUserAgent)
-	if c.appInfo.appName != "" && c.appInfo.appVersion != "" {
-		req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion)
-	}
-}
-
-// makeTargetURL make a new target url.
-func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
-	host := c.endpointURL.Host
-	// For Amazon S3 endpoint, try to fetch location based endpoint.
-	if s3utils.IsAmazonEndpoint(c.endpointURL) {
-		if c.s3AccelerateEndpoint != "" && bucketName != "" {
-			// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
-			// Disable transfer acceleration for non-compliant bucket names.
-			if strings.Contains(bucketName, ".") {
-				return nil, ErrTransferAccelerationBucket(bucketName)
-			}
-			// If transfer acceleration is requested set new host.
-			// For more details about enabling transfer acceleration read here.
-			// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
-			host = c.s3AccelerateEndpoint
-		} else {
-			// Do not change the host if the endpoint URL is a FIPS S3 endpoint.
-			if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) {
-				// Fetch new host based on the bucket location.
-				host = getS3Endpoint(bucketLocation)
-			}
-		}
-	}
-
-	// Save scheme.
-	scheme := c.endpointURL.Scheme
-
-	// Strip port 80 and 443 so we won't send these ports in Host header.
-	// The reason is that browsers and curl automatically remove :80 and :443
-	// with the generated presigned urls, then a signature mismatch error.
-	if h, p, err := net.SplitHostPort(host); err == nil {
-		if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
-			host = h
-		}
-	}
-
-	urlStr := scheme + "://" + host + "/"
-	// Make URL only if bucketName is available, otherwise use the
-	// endpoint URL.
-	if bucketName != "" {
-		// Save if target url will have buckets which suppport virtual host.
-		isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
-
-		// If endpoint supports virtual host style use that always.
-		// Currently only S3 and Google Cloud Storage would support
-		// virtual host style.
-		if isVirtualHostStyle {
-			urlStr = scheme + "://" + bucketName + "." + host + "/"
-			if objectName != "" {
-				urlStr = urlStr + s3utils.EncodePath(objectName)
-			}
-		} else {
-			// If not fall back to using path style.
-			urlStr = urlStr + bucketName + "/"
-			if objectName != "" {
-				urlStr = urlStr + s3utils.EncodePath(objectName)
-			}
-		}
-	}
-
-	// If there are any query values, add them to the end.
-	if len(queryValues) > 0 {
-		urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
-	}
-
-	u, err := url.Parse(urlStr)
-	if err != nil {
-		return nil, err
-	}
-
-	return u, nil
-}

+ 0 - 219
vendor/github.com/minio/minio-go/bucket-cache.go

@@ -1,219 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"net/http"
-	"net/url"
-	"path"
-	"sync"
-
-	"github.com/minio/minio-go/pkg/credentials"
-	"github.com/minio/minio-go/pkg/s3signer"
-	"github.com/minio/minio-go/pkg/s3utils"
-)
-
-// bucketLocationCache - Provides simple mechanism to hold bucket
-// locations in memory.
-type bucketLocationCache struct {
-	// mutex is used for handling the concurrent
-	// read/write requests for cache.
-	sync.RWMutex
-
-	// items holds the cached bucket locations.
-	items map[string]string
-}
-
-// newBucketLocationCache - Provides a new bucket location cache to be
-// used internally with the client object.
-func newBucketLocationCache() *bucketLocationCache {
-	return &bucketLocationCache{
-		items: make(map[string]string),
-	}
-}
-
-// Get - Returns a value of a given key if it exists.
-func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) {
-	r.RLock()
-	defer r.RUnlock()
-	location, ok = r.items[bucketName]
-	return
-}
-
-// Set - Will persist a value into cache.
-func (r *bucketLocationCache) Set(bucketName string, location string) {
-	r.Lock()
-	defer r.Unlock()
-	r.items[bucketName] = location
-}
-
-// Delete - Deletes a bucket name from cache.
-func (r *bucketLocationCache) Delete(bucketName string) {
-	r.Lock()
-	defer r.Unlock()
-	delete(r.items, bucketName)
-}
-
-// GetBucketLocation - get location for the bucket name from location cache, if not
-// fetch freshly by making a new request.
-func (c Client) GetBucketLocation(bucketName string) (string, error) {
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return "", err
-	}
-	return c.getBucketLocation(bucketName)
-}
-
-// getBucketLocation - Get location for the bucketName from location map cache, if not
-// fetch freshly by making a new request.
-func (c Client) getBucketLocation(bucketName string) (string, error) {
-	if err := s3utils.CheckValidBucketName(bucketName); err != nil {
-		return "", err
-	}
-
-	// Region set then no need to fetch bucket location.
-	if c.region != "" {
-		return c.region, nil
-	}
-
-	if location, ok := c.bucketLocCache.Get(bucketName); ok {
-		return location, nil
-	}
-
-	// Initialize a new request.
-	req, err := c.getBucketLocationRequest(bucketName)
-	if err != nil {
-		return "", err
-	}
-
-	// Initiate the request.
-	resp, err := c.do(req)
-	defer closeResponse(resp)
-	if err != nil {
-		return "", err
-	}
-	location, err := processBucketLocationResponse(resp, bucketName)
-	if err != nil {
-		return "", err
-	}
-	c.bucketLocCache.Set(bucketName, location)
-	return location, nil
-}
-
-// processes the getBucketLocation http response from the server.
-func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) {
-	if resp != nil {
-		if resp.StatusCode != http.StatusOK {
-			err = httpRespToErrorResponse(resp, bucketName, "")
-			errResp := ToErrorResponse(err)
-			// For access denied error, it could be an anonymous
-			// request. Move forward and let the top level callers
-			// succeed if possible based on their policy.
-			if errResp.Code == "AccessDenied" {
-				return "us-east-1", nil
-			}
-			return "", err
-		}
-	}
-
-	// Extract location.
-	var locationConstraint string
-	err = xmlDecoder(resp.Body, &locationConstraint)
-	if err != nil {
-		return "", err
-	}
-
-	location := locationConstraint
-	// Location is empty will be 'us-east-1'.
-	if location == "" {
-		location = "us-east-1"
-	}
-
-	// Location can be 'EU' convert it to meaningful 'eu-west-1'.
-	if location == "EU" {
-		location = "eu-west-1"
-	}
-
-	// Save the location into cache.
-
-	// Return.
-	return location, nil
-}
-
-// getBucketLocationRequest - Wrapper creates a new getBucketLocation request.
-func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) {
-	// Set location query.
-	urlValues := make(url.Values)
-	urlValues.Set("location", "")
-
-	// Set get bucket location always as path style.
-	targetURL := c.endpointURL
-	targetURL.Path = path.Join(bucketName, "") + "/"
-	targetURL.RawQuery = urlValues.Encode()
-
-	// Get a new HTTP request for the method.
-	req, err := http.NewRequest("GET", targetURL.String(), nil)
-	if err != nil {
-		return nil, err
-	}
-
-	// Set UserAgent for the request.
-	c.setUserAgent(req)
-
-	// Get credentials from the configured credentials provider.
-	value, err := c.credsProvider.Get()
-	if err != nil {
-		return nil, err
-	}
-
-	var (
-		signerType      = value.SignerType
-		accessKeyID     = value.AccessKeyID
-		secretAccessKey = value.SecretAccessKey
-		sessionToken    = value.SessionToken
-	)
-
-	// Custom signer set then override the behavior.
-	if c.overrideSignerType != credentials.SignatureDefault {
-		signerType = c.overrideSignerType
-	}
-
-	// If signerType returned by credentials helper is anonymous,
-	// then do not sign regardless of signerType override.
-	if value.SignerType == credentials.SignatureAnonymous {
-		signerType = credentials.SignatureAnonymous
-	}
-
-	if signerType.IsAnonymous() {
-		return req, nil
-	}
-
-	if signerType.IsV2() {
-		req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
-		return req, nil
-	}
-
-	// Set sha256 sum for signature calculation only with signature version '4'.
-	contentSha256 := emptySHA256Hex
-	if c.secure {
-		contentSha256 = unsignedPayload
-	}
-
-	req.Header.Set("X-Amz-Content-Sha256", contentSha256)
-	req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
-	return req, nil
-}

+ 0 - 232
vendor/github.com/minio/minio-go/bucket-notification.go

@@ -1,232 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"encoding/xml"
-	"reflect"
-)
-
-// NotificationEventType is a S3 notification event associated to the bucket notification configuration
-type NotificationEventType string
-
-// The role of all event types are described in :
-// 	http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
-const (
-	ObjectCreatedAll                     NotificationEventType = "s3:ObjectCreated:*"
-	ObjectCreatedPut                                           = "s3:ObjectCreated:Put"
-	ObjectCreatedPost                                          = "s3:ObjectCreated:Post"
-	ObjectCreatedCopy                                          = "s3:ObjectCreated:Copy"
-	ObjectCreatedCompleteMultipartUpload                       = "s3:ObjectCreated:CompleteMultipartUpload"
-	ObjectAccessedGet                                          = "s3:ObjectAccessed:Get"
-	ObjectAccessedHead                                         = "s3:ObjectAccessed:Head"
-	ObjectAccessedAll                                          = "s3:ObjectAccessed:*"
-	ObjectRemovedAll                                           = "s3:ObjectRemoved:*"
-	ObjectRemovedDelete                                        = "s3:ObjectRemoved:Delete"
-	ObjectRemovedDeleteMarkerCreated                           = "s3:ObjectRemoved:DeleteMarkerCreated"
-	ObjectReducedRedundancyLostObject                          = "s3:ReducedRedundancyLostObject"
-)
-
-// FilterRule - child of S3Key, a tag in the notification xml which
-// carries suffix/prefix filters
-type FilterRule struct {
-	Name  string `xml:"Name"`
-	Value string `xml:"Value"`
-}
-
-// S3Key - child of Filter, a tag in the notification xml which
-// carries suffix/prefix filters
-type S3Key struct {
-	FilterRules []FilterRule `xml:"FilterRule,omitempty"`
-}
-
-// Filter - a tag in the notification xml structure which carries
-// suffix/prefix filters
-type Filter struct {
-	S3Key S3Key `xml:"S3Key,omitempty"`
-}
-
-// Arn - holds ARN information that will be sent to the web service,
-// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
-type Arn struct {
-	Partition string
-	Service   string
-	Region    string
-	AccountID string
-	Resource  string
-}
-
-// NewArn creates new ARN based on the given partition, service, region, account id and resource
-func NewArn(partition, service, region, accountID, resource string) Arn {
-	return Arn{Partition: partition,
-		Service:   service,
-		Region:    region,
-		AccountID: accountID,
-		Resource:  resource}
-}
-
-// Return the string format of the ARN
-func (arn Arn) String() string {
-	return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
-}
-
-// NotificationConfig - represents one single notification configuration
-// such as topic, queue or lambda configuration.
-type NotificationConfig struct {
-	ID     string                  `xml:"Id,omitempty"`
-	Arn    Arn                     `xml:"-"`
-	Events []NotificationEventType `xml:"Event"`
-	Filter *Filter                 `xml:"Filter,omitempty"`
-}
-
-// NewNotificationConfig creates one notification config and sets the given ARN
-func NewNotificationConfig(arn Arn) NotificationConfig {
-	return NotificationConfig{Arn: arn}
-}
-
-// AddEvents adds one event to the current notification config
-func (t *NotificationConfig) AddEvents(events ...NotificationEventType) {
-	t.Events = append(t.Events, events...)
-}
-
-// AddFilterSuffix sets the suffix configuration to the current notification config
-func (t *NotificationConfig) AddFilterSuffix(suffix string) {
-	if t.Filter == nil {
-		t.Filter = &Filter{}
-	}
-	newFilterRule := FilterRule{Name: "suffix", Value: suffix}
-	// Replace any suffix rule if existing and add to the list otherwise
-	for index := range t.Filter.S3Key.FilterRules {
-		if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
-			t.Filter.S3Key.FilterRules[index] = newFilterRule
-			return
-		}
-	}
-	t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
-}
-
-// AddFilterPrefix sets the prefix configuration to the current notification config
-func (t *NotificationConfig) AddFilterPrefix(prefix string) {
-	if t.Filter == nil {
-		t.Filter = &Filter{}
-	}
-	newFilterRule := FilterRule{Name: "prefix", Value: prefix}
-	// Replace any prefix rule if existing and add to the list otherwise
-	for index := range t.Filter.S3Key.FilterRules {
-		if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
-			t.Filter.S3Key.FilterRules[index] = newFilterRule
-			return
-		}
-	}
-	t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
-}
-
-// TopicConfig carries one single topic notification configuration
-type TopicConfig struct {
-	NotificationConfig
-	Topic string `xml:"Topic"`
-}
-
-// QueueConfig carries one single queue notification configuration
-type QueueConfig struct {
-	NotificationConfig
-	Queue string `xml:"Queue"`
-}
-
-// LambdaConfig carries one single cloudfunction notification configuration
-type LambdaConfig struct {
-	NotificationConfig
-	Lambda string `xml:"CloudFunction"`
-}
-
-// BucketNotification - the struct that represents the whole XML to be sent to the web service
-type BucketNotification struct {
-	XMLName       xml.Name       `xml:"NotificationConfiguration"`
-	LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
-	TopicConfigs  []TopicConfig  `xml:"TopicConfiguration"`
-	QueueConfigs  []QueueConfig  `xml:"QueueConfiguration"`
-}
-
-// AddTopic adds a given topic config to the general bucket notification config
-func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
-	newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
-	for _, n := range b.TopicConfigs {
-		if reflect.DeepEqual(n, newTopicConfig) {
-			// Avoid adding duplicated entry
-			return
-		}
-	}
-	b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
-}
-
-// AddQueue adds a given queue config to the general bucket notification config
-func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
-	newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
-	for _, n := range b.QueueConfigs {
-		if reflect.DeepEqual(n, newQueueConfig) {
-			// Avoid adding duplicated entry
-			return
-		}
-	}
-	b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
-}
-
-// AddLambda adds a given lambda config to the general bucket notification config
-func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
-	newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
-	for _, n := range b.LambdaConfigs {
-		if reflect.DeepEqual(n, newLambdaConfig) {
-			// Avoid adding duplicated entry
-			return
-		}
-	}
-	b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
-}
-
-// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
-func (b *BucketNotification) RemoveTopicByArn(arn Arn) {
-	var topics []TopicConfig
-	for _, topic := range b.TopicConfigs {
-		if topic.Topic != arn.String() {
-			topics = append(topics, topic)
-		}
-	}
-	b.TopicConfigs = topics
-}
-
-// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
-func (b *BucketNotification) RemoveQueueByArn(arn Arn) {
-	var queues []QueueConfig
-	for _, queue := range b.QueueConfigs {
-		if queue.Queue != arn.String() {
-			queues = append(queues, queue)
-		}
-	}
-	b.QueueConfigs = queues
-}
-
-// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
-func (b *BucketNotification) RemoveLambdaByArn(arn Arn) {
-	var lambdas []LambdaConfig
-	for _, lambda := range b.LambdaConfigs {
-		if lambda.Lambda != arn.String() {
-			lambdas = append(lambdas, lambda)
-		}
-	}
-	b.LambdaConfigs = lambdas
-}

+ 0 - 70
vendor/github.com/minio/minio-go/constants.go

@@ -1,70 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-/// Multipart upload defaults.
-
-// absMinPartSize - absolute minimum part size (5 MiB) below which
-// a part in a multipart upload may not be uploaded.
-const absMinPartSize = 1024 * 1024 * 5
-
-// minPartSize - minimum part size 64MiB per object after which
-// putObject behaves internally as multipart.
-const minPartSize = 1024 * 1024 * 64
-
-// copyPartSize - default (and maximum) part size to copy in a
-// copy-object request (5GiB)
-const copyPartSize = 1024 * 1024 * 1024 * 5
-
-// maxPartsCount - maximum number of parts for a single multipart session.
-const maxPartsCount = 10000
-
-// maxPartSize - maximum part size 5GiB for a single multipart upload
-// operation.
-const maxPartSize = 1024 * 1024 * 1024 * 5
-
-// maxSinglePutObjectSize - maximum size 5GiB of object per PUT
-// operation.
-const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
-
-// maxMultipartPutObjectSize - maximum size 5TiB of object for
-// Multipart operation.
-const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
-
-// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
-// we don't want to sign the request payload
-const unsignedPayload = "UNSIGNED-PAYLOAD"
-
-// Total number of parallel workers used for multipart operation.
-const totalWorkers = 4
-
-// Signature related constants.
-const (
-	signV4Algorithm   = "AWS4-HMAC-SHA256"
-	iso8601DateFormat = "20060102T150405Z"
-)
-
-// Encryption headers stored along with the object.
-const (
-	amzHeaderIV      = "X-Amz-Meta-X-Amz-Iv"
-	amzHeaderKey     = "X-Amz-Meta-X-Amz-Key"
-	amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc"
-)
-
-// Storage class header constant.
-const amzStorageClass = "X-Amz-Storage-Class"

+ 0 - 154
vendor/github.com/minio/minio-go/core.go

@@ -1,154 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
-	"context"
-	"io"
-	"strings"
-
-	"github.com/minio/minio-go/pkg/policy"
-)
-
-// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
-type Core struct {
-	*Client
-}
-
-// NewCore - Returns new initialized a Core client, this CoreClient should be
-// only used under special conditions such as need to access lower primitives
-// and being able to use them to write your own wrappers.
-func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) {
-	var s3Client Core
-	client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure)
-	if err != nil {
-		return nil, err
-	}
-	s3Client.Client = client
-	return &s3Client, nil
-}
-
-// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
-// you can further filter the results.
-func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
-	return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys)
-}
-
-// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
-// continuationToken instead of marker to further filter the results.
-func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
-	return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys)
-}
-
-// CopyObject - copies an object from source object to destination object on server side.
-func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) {
-	return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
-}
-
-// CopyObjectPart - creates a part in a multipart upload by copying (a
-// part of) an existing object.
-func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
-	partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
-
-	return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID,
-		partID, startOffset, length, metadata)
-}
-
-// PutObject - Upload object. Uploads using single PUT call.
-func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
-	opts := PutObjectOptions{}
-	m := make(map[string]string)
-	for k, v := range metadata {
-		if strings.ToLower(k) == "content-encoding" {
-			opts.ContentEncoding = v
-		} else if strings.ToLower(k) == "content-disposition" {
-			opts.ContentDisposition = v
-		} else if strings.ToLower(k) == "content-type" {
-			opts.ContentType = v
-		} else if strings.ToLower(k) == "cache-control" {
-			opts.CacheControl = v
-		} else {
-			m[k] = metadata[k]
-		}
-	}
-	opts.UserMetadata = m
-	return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
-}
-
-// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
-func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
-	result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts)
-	return result.UploadID, err
-}
-
-// ListMultipartUploads - List incomplete uploads.
-func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
-	return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
-}
-
-// PutObjectPart - Upload an object part.
-func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
-	return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil)
-}
-
-// PutObjectPartWithMetadata - upload an object part with additional request metadata.
-func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader,
-	size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) {
-	return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata)
-}
-
-// ListObjectParts - List uploaded parts of an incomplete upload.x
-func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {
-	return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)
-}
-
-// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
-func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
-	_, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{
-		Parts: parts,
-	})
-	return err
-}
-
-// AbortMultipartUpload - Abort an incomplete upload.
-func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
-	return c.abortMultipartUpload(context.Background(), bucket, object, uploadID)
-}
-
-// GetBucketPolicy - fetches bucket access policy for a given bucket.
-func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
-	return c.getBucketPolicy(bucket)
-}
-
-// PutBucketPolicy - applies a new bucket access policy for a given bucket.
-func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error {
-	return c.putBucketPolicy(bucket, bucketPolicy)
-}
-
-// GetObject is a lower level API implemented to support reading
-// partial objects and also downloading objects with special conditions
-// matching etag, modtime etc.
-func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
-	return c.getObject(context.Background(), bucketName, objectName, opts)
-}
-
-// StatObject is a lower level API implemented to support special
-// conditions matching etag, modtime on a request.
-func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
-	return c.statObject(context.Background(), bucketName, objectName, opts)
-}

+ 0 - 227
vendor/github.com/minio/minio-go/docs/validator.go

@@ -1,227 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"fmt"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"text/template"
-
-	"github.com/a8m/mark"
-	"github.com/gernest/wow"
-	"github.com/gernest/wow/spin"
-	"github.com/minio/cli"
-)
-
-func init() {
-	// Validate go binary.
-	if _, err := exec.LookPath("go"); err != nil {
-		panic(err)
-	}
-}
-
-var globalFlags = []cli.Flag{
-	cli.StringFlag{
-		Name:  "m",
-		Value: "API.md",
-		Usage: "Path to markdown api documentation.",
-	},
-	cli.StringFlag{
-		Name:  "t",
-		Value: "checker.go.template",
-		Usage: "Template used for generating the programs.",
-	},
-	cli.IntFlag{
-		Name:  "skip",
-		Value: 2,
-		Usage: "Skip entries before validating the code.",
-	},
-}
-
-func runGofmt(path string) (msg string, err error) {
-	cmdArgs := []string{"-s", "-w", "-l", path}
-	cmd := exec.Command("gofmt", cmdArgs...)
-	stdoutStderr, err := cmd.CombinedOutput()
-	if err != nil {
-		return "", err
-	}
-	return string(stdoutStderr), nil
-}
-
-func runGoImports(path string) (msg string, err error) {
-	cmdArgs := []string{"-w", path}
-	cmd := exec.Command("goimports", cmdArgs...)
-	stdoutStderr, err := cmd.CombinedOutput()
-	if err != nil {
-		return string(stdoutStderr), err
-	}
-	return string(stdoutStderr), nil
-}
-
-func runGoBuild(path string) (msg string, err error) {
-	// Go build the path.
-	cmdArgs := []string{"build", "-o", "/dev/null", path}
-	cmd := exec.Command("go", cmdArgs...)
-	stdoutStderr, err := cmd.CombinedOutput()
-	if err != nil {
-		return string(stdoutStderr), err
-	}
-	return string(stdoutStderr), nil
-}
-
-func validatorAction(ctx *cli.Context) error {
-	if !ctx.IsSet("m") || !ctx.IsSet("t") {
-		return nil
-	}
-	docPath := ctx.String("m")
-	var err error
-	docPath, err = filepath.Abs(docPath)
-	if err != nil {
-		return err
-	}
-	data, err := ioutil.ReadFile(docPath)
-	if err != nil {
-		return err
-	}
-
-	templatePath := ctx.String("t")
-	templatePath, err = filepath.Abs(templatePath)
-	if err != nil {
-		return err
-	}
-
-	skipEntries := ctx.Int("skip")
-	m := mark.New(string(data), &mark.Options{
-		Gfm: true, // Github markdown support is enabled by default.
-	})
-
-	t, err := template.ParseFiles(templatePath)
-	if err != nil {
-		return err
-	}
-
-	tmpDir, err := ioutil.TempDir("", "md-verifier")
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpDir)
-
-	entryN := 1
-	for i := mark.NodeText; i < mark.NodeCheckbox; i++ {
-		if mark.NodeCode != mark.NodeType(i) {
-			m.AddRenderFn(mark.NodeType(i), func(node mark.Node) (s string) {
-				return ""
-			})
-			continue
-		}
-		m.AddRenderFn(mark.NodeCode, func(node mark.Node) (s string) {
-			p, ok := node.(*mark.CodeNode)
-			if !ok {
-				return
-			}
-			p.Text = strings.NewReplacer("&lt;", "<", "&gt;", ">", "&quot;", `"`, "&amp;", "&").Replace(p.Text)
-			if skipEntries > 0 {
-				skipEntries--
-				return
-			}
-
-			testFilePath := filepath.Join(tmpDir, "example.go")
-			w, werr := os.Create(testFilePath)
-			if werr != nil {
-				panic(werr)
-			}
-			t.Execute(w, p)
-			w.Sync()
-			w.Close()
-			entryN++
-
-			msg, err := runGofmt(testFilePath)
-			if err != nil {
-				fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
-				os.Exit(-1)
-			}
-
-			msg, err = runGoImports(testFilePath)
-			if err != nil {
-				fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
-				os.Exit(-1)
-			}
-
-			msg, err = runGoBuild(testFilePath)
-			if err != nil {
-				fmt.Printf("Failed running gobuild on %s, with (%s):(%s)\n", testFilePath, msg, err)
-				fmt.Printf("Code with possible issue in %s:\n%s", docPath, p.Text)
-				fmt.Printf("To test `go build %s`\n", testFilePath)
-				os.Exit(-1)
-			}
-
-			// Once successfully built remove the test file
-			os.Remove(testFilePath)
-			return
-		})
-	}
-
-	w := wow.New(os.Stdout, spin.Get(spin.Moon), fmt.Sprintf(" Running validation tests in %s", tmpDir))
-
-	w.Start()
-	// Render markdown executes our checker on each code blocks.
-	_ = m.Render()
-	w.PersistWith(spin.Get(spin.Runner), " Successfully finished tests")
-	w.Stop()
-
-	return nil
-}
-
-func main() {
-	app := cli.NewApp()
-	app.Action = validatorAction
-	app.HideVersion = true
-	app.HideHelpCommand = true
-	app.Usage = "Validates code block sections inside API.md"
-	app.Author = "Minio.io"
-	app.Flags = globalFlags
-	// Help template for validator
-	app.CustomAppHelpTemplate = `NAME:
-  {{.Name}} - {{.Usage}}
-
-USAGE:
-  {{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
-
-COMMANDS:
-  {{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
-  {{end}}{{if .VisibleFlags}}
-FLAGS:
-  {{range .VisibleFlags}}{{.}}
-  {{end}}{{end}}
-TEMPLATE:
-  Validator uses Go's 'text/template' formatting so you need to ensure 
-  your template is formatted correctly, check 'docs/checker.go.template'
-
-USAGE:
-  go run docs/validator.go -m docs/API.md -t /tmp/mycode.go.template
-
-`
-	app.Run(os.Args)
-
-}

+ 0 - 61
vendor/github.com/minio/minio-go/examples/minio/listenbucketnotification.go

@@ -1,61 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESS", "YOUR-SECRET", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// s3Client.TraceOn(os.Stderr)
-
-	// Create a done channel to control 'ListenBucketNotification' go routine.
-	doneCh := make(chan struct{})
-
-	// Indicate to our routine to exit cleanly upon return.
-	defer close(doneCh)
-
-	// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
-	for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
-		"s3:ObjectCreated:*",
-		"s3:ObjectAccessed:*",
-		"s3:ObjectRemoved:*",
-	}, doneCh) {
-		if notificationInfo.Err != nil {
-			log.Fatalln(notificationInfo.Err)
-		}
-		log.Println(notificationInfo)
-	}
-}

+ 0 - 52
vendor/github.com/minio/minio-go/examples/s3/bucketexists.go

@@ -1,52 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	found, err := s3Client.BucketExists("my-bucketname")
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	if found {
-		log.Println("Bucket found.")
-	} else {
-		log.Println("Bucket not found.")
-	}
-}

+ 0 - 78
vendor/github.com/minio/minio-go/examples/s3/composeobject.go

@@ -1,78 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	minio "github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Enable trace.
-	// s3Client.TraceOn(os.Stderr)
-
-	// Prepare source decryption key (here we assume same key to
-	// decrypt all source objects.)
-	decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
-
-	// Source objects to concatenate. We also specify decryption
-	// key for each
-	src1 := minio.NewSourceInfo("bucket1", "object1", &decKey)
-	src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
-
-	src2 := minio.NewSourceInfo("bucket2", "object2", &decKey)
-	src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2")
-
-	src3 := minio.NewSourceInfo("bucket3", "object3", &decKey)
-	src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38")
-
-	// Create slice of sources.
-	srcs := []minio.SourceInfo{src1, src2, src3}
-
-	// Prepare destination encryption key
-	encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
-
-	// Create destination info
-	dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	err = s3Client.ComposeObject(dst, srcs)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	log.Println("Composed object successfully.")
-}

+ 0 - 75
vendor/github.com/minio/minio-go/examples/s3/copyobject.go

@@ -1,75 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"time"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Enable trace.
-	// s3Client.TraceOn(os.Stderr)
-
-	// Source object
-	src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
-
-	// All following conditions are allowed and can be combined together.
-
-	// Set modified condition, copy object modified since 2014 April.
-	src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-
-	// Set unmodified condition, copy object unmodified since 2014 April.
-	// src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-
-	// Set matching ETag condition, copy object which matches the following ETag.
-	// src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
-
-	// Set matching ETag except condition, copy object which does not match the following ETag.
-	// src.SetMatchETagExceptCond("31624deb84149d2f8ef9c385918b653a")
-
-	// Destination object
-	dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Initiate copy object.
-	err = s3Client.CopyObject(dst, src)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Copied source object /my-sourcebucketname/my-sourceobjectname to destination /my-bucketname/my-objectname Successfully.")
-}

+ 0 - 54
vendor/github.com/minio/minio-go/examples/s3/fgetobject-context.go

@@ -1,54 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"time"
-
-	"context"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
-	// and my-filename.csv are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
-	defer cancel()
-
-	if err := s3Client.FGetObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Successfully saved my-filename.csv")
-
-}

+ 0 - 46
vendor/github.com/minio/minio-go/examples/s3/fgetobject.go

@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
-	// and my-filename.csv are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Successfully saved my-filename.csv")
-}

+ 0 - 80
vendor/github.com/minio/minio-go/examples/s3/fputencrypted-object.go

@@ -1,80 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-	"github.com/minio/minio-go/pkg/encrypt"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Specify a local file that we will upload
-	filePath := "my-testfile"
-
-	//// Build an asymmetric key from private and public files
-	//
-	// privateKey, err := ioutil.ReadFile("private.key")
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	//
-	// publicKey, err := ioutil.ReadFile("public.key")
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	//
-	// asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	////
-
-	// Build a symmetric key
-	symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
-
-	// Build encryption materials which will encrypt uploaded data
-	cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Encrypt file content and upload to the server
-	n, err := s3Client.FPutEncryptedObject("my-bucketname", "my-objectname", filePath, cbcMaterials)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}

+ 0 - 53
vendor/github.com/minio/minio-go/examples/s3/fputobject-context.go

@@ -1,53 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"time"
-
-	"context"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
-	// and my-filename.csv are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
-	defer cancel()
-
-	if _, err := s3Client.FPutObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ContentType: "application/csv"}); err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Successfully uploaded my-filename.csv")
-}

+ 0 - 48
vendor/github.com/minio/minio-go/examples/s3/fputobject.go

@@ -1,48 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
-	// and my-filename.csv are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
-		ContentType: "application/csv",
-	}); err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Successfully uploaded my-filename.csv")
-}

+ 0 - 89
vendor/github.com/minio/minio-go/examples/s3/get-encrypted-object.go

@@ -1,89 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"io"
-	"log"
-	"os"
-
-	"github.com/minio/minio-go"
-	"github.com/minio/minio-go/pkg/encrypt"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
-	// my-testfile are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	//// Build an asymmetric key from private and public files
-	//
-	// privateKey, err := ioutil.ReadFile("private.key")
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	//
-	// publicKey, err := ioutil.ReadFile("public.key")
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	//
-	// asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	////
-
-	// Build a symmetric key
-	symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
-
-	// Build encryption materials which will encrypt uploaded data
-	cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Get a deciphered data from the server, deciphering is assured by cbcMaterials
-	reader, err := s3Client.GetEncryptedObject("my-bucketname", "my-objectname", cbcMaterials)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer reader.Close()
-
-	// Local file which holds plain data
-	localFile, err := os.Create("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer localFile.Close()
-
-	if _, err := io.Copy(localFile, reader); err != nil {
-		log.Fatalln(err)
-	}
-}

+ 0 - 56
vendor/github.com/minio/minio-go/examples/s3/getbucketnotification.go

@@ -1,56 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// s3Client.TraceOn(os.Stderr)
-
-	notifications, err := s3Client.GetBucketNotification("my-bucketname")
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	log.Println("Bucket notification are successfully retrieved.")
-
-	for _, topicConfig := range notifications.TopicConfigs {
-		for _, e := range topicConfig.Events {
-			log.Println(e + " event is enabled.")
-		}
-	}
-}

+ 0 - 56
vendor/github.com/minio/minio-go/examples/s3/getbucketpolicy.go

@@ -1,56 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// s3Client.TraceOn(os.Stderr)
-
-	// Fetch the policy at 'my-objectprefix'.
-	policy, err := s3Client.GetBucketPolicy("my-bucketname", "my-objectprefix")
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Description of policy output.
-	// "none" -  The specified bucket does not have a bucket policy.
-	// "readonly" - Read only operations are allowed.
-	// "writeonly" - Write only operations are allowed.
-	// "readwrite" - both read and write operations are allowed, the bucket is public.
-	log.Println("Success - ", policy)
-}

+ 0 - 73
vendor/github.com/minio/minio-go/examples/s3/getobject-context.go

@@ -1,73 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"io"
-	"log"
-	"os"
-	"time"
-
-	"context"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
-	// my-testfile are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
-	defer cancel()
-
-	opts := minio.GetObjectOptions{}
-	opts.SetModified(time.Now().Round(10 * time.Minute)) // get object if was modified within the last 10 minutes
-	reader, err := s3Client.GetObjectWithContext(ctx, "my-bucketname", "my-objectname", opts)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer reader.Close()
-
-	localFile, err := os.Create("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer localFile.Close()
-
-	stat, err := reader.Stat()
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
-		log.Fatalln(err)
-	}
-}

+ 0 - 64
vendor/github.com/minio/minio-go/examples/s3/getobject.go

@@ -1,64 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"io"
-	"log"
-	"os"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
-	// my-testfile are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer reader.Close()
-
-	localFile, err := os.Create("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer localFile.Close()
-
-	stat, err := reader.Stat()
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
-		log.Fatalln(err)
-	}
-}

+ 0 - 57
vendor/github.com/minio/minio-go/examples/s3/listbucketpolicies.go

@@ -1,57 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// s3Client.TraceOn(os.Stderr)
-
-	// Fetch the policy at 'my-objectprefix'.
-	policies, err := s3Client.ListBucketPolicies("my-bucketname", "my-objectprefix")
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// ListBucketPolicies returns a map of objects policy rules and their associated permissions
-	//    e.g.    mybucket/downloadfolder/* => readonly
-	//	      mybucket/shared/* => readwrite
-
-	for resource, permission := range policies {
-		log.Println(resource, " => ", permission)
-	}
-}

+ 0 - 49
vendor/github.com/minio/minio-go/examples/s3/listbuckets.go

@@ -1,49 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID and YOUR-SECRETACCESSKEY are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	buckets, err := s3Client.ListBuckets()
-	if err != nil {
-		log.Fatalln(err)
-	}
-	for _, bucket := range buckets {
-		log.Println(bucket)
-	}
-}

+ 0 - 58
vendor/github.com/minio/minio-go/examples/s3/listincompleteuploads.go

@@ -1,58 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"fmt"
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Create a done channel to control 'ListObjects' go routine.
-	doneCh := make(chan struct{})
-
-	// Indicate to our routine to exit cleanly upon return.
-	defer close(doneCh)
-
-	// List all multipart uploads from a bucket-name with a matching prefix.
-	for multipartObject := range s3Client.ListIncompleteUploads("my-bucketname", "my-prefixname", true, doneCh) {
-		if multipartObject.Err != nil {
-			fmt.Println(multipartObject.Err)
-			return
-		}
-		fmt.Println(multipartObject)
-	}
-	return
-}

+ 0 - 77
vendor/github.com/minio/minio-go/examples/s3/listobjects-N.go

@@ -1,77 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"fmt"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		fmt.Println(err)
-		return
-	}
-
-	// List 'N' number of objects from a bucket-name with a matching prefix.
-	listObjectsN := func(bucket, prefix string, recursive bool, N int) (objsInfo []minio.ObjectInfo, err error) {
-		// Create a done channel to control 'ListObjects' go routine.
-		doneCh := make(chan struct{}, 1)
-
-		// Free the channel upon return.
-		defer close(doneCh)
-
-		i := 1
-		for object := range s3Client.ListObjects(bucket, prefix, recursive, doneCh) {
-			if object.Err != nil {
-				return nil, object.Err
-			}
-			i++
-			// Verify if we have printed N objects.
-			if i == N {
-				// Indicate ListObjects go-routine to exit and stop
-				// feeding the objectInfo channel.
-				doneCh <- struct{}{}
-			}
-			objsInfo = append(objsInfo, object)
-		}
-		return objsInfo, nil
-	}
-
-	// List recursively first 100 entries for prefix 'my-prefixname'.
-	recursive := true
-	objsInfo, err := listObjectsN("my-bucketname", "my-prefixname", recursive, 100)
-	if err != nil {
-		fmt.Println(err)
-	}
-
-	// Print all the entries.
-	fmt.Println(objsInfo)
-}

+ 0 - 58
vendor/github.com/minio/minio-go/examples/s3/listobjects.go

@@ -1,58 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"fmt"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		fmt.Println(err)
-		return
-	}
-
-	// Create a done channel to control 'ListObjects' go routine.
-	doneCh := make(chan struct{})
-
-	// Indicate to our routine to exit cleanly upon return.
-	defer close(doneCh)
-
-	// List all objects from a bucket-name with a matching prefix.
-	for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
-		if object.Err != nil {
-			fmt.Println(object.Err)
-			return
-		}
-		fmt.Println(object)
-	}
-	return
-}

+ 0 - 58
vendor/github.com/minio/minio-go/examples/s3/listobjectsV2.go

@@ -1,58 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"fmt"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-prefixname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		fmt.Println(err)
-		return
-	}
-
-	// Create a done channel to control 'ListObjects' go routine.
-	doneCh := make(chan struct{})
-
-	// Indicate to our routine to exit cleanly upon return.
-	defer close(doneCh)
-
-	// List all objects from a bucket-name with a matching prefix.
-	for object := range s3Client.ListObjectsV2("my-bucketname", "my-prefixname", true, doneCh) {
-		if object.Err != nil {
-			fmt.Println(object.Err)
-			return
-		}
-		fmt.Println(object)
-	}
-	return
-}

+ 0 - 47
vendor/github.com/minio/minio-go/examples/s3/makebucket.go

@@ -1,47 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	err = s3Client.MakeBucket("my-bucketname", "us-east-1")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Success")
-}

+ 0 - 54
vendor/github.com/minio/minio-go/examples/s3/presignedgetobject.go

@@ -1,54 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"net/url"
-	"time"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Set request parameters
-	reqParams := make(url.Values)
-	reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
-
-	// Gernerate presigned get object url.
-	presignedURL, err := s3Client.PresignedGetObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println(presignedURL)
-}

+ 0 - 54
vendor/github.com/minio/minio-go/examples/s3/presignedheadobject.go

@@ -1,54 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"net/url"
-	"time"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Set request parameters
-	reqParams := make(url.Values)
-	reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
-
-	// Gernerate presigned get object url.
-	presignedURL, err := s3Client.PresignedHeadObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println(presignedURL)
-}

+ 0 - 60
vendor/github.com/minio/minio-go/examples/s3/presignedpostpolicy.go

@@ -1,60 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"fmt"
-	"log"
-	"time"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	policy := minio.NewPostPolicy()
-	policy.SetBucket("my-bucketname")
-	policy.SetKey("my-objectname")
-	// Expires in 10 days.
-	policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10))
-	// Returns form data for POST form request.
-	url, formData, err := s3Client.PresignedPostPolicy(policy)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	fmt.Printf("curl ")
-	for k, v := range formData {
-		fmt.Printf("-F %s=%s ", k, v)
-	}
-	fmt.Printf("-F file=@/etc/bash.bashrc ")
-	fmt.Printf("%s\n", url)
-}

+ 0 - 48
vendor/github.com/minio/minio-go/examples/s3/presignedputobject.go

@@ -1,48 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"time"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	presignedURL, err := s3Client.PresignedPutObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println(presignedURL)
-}

+ 0 - 85
vendor/github.com/minio/minio-go/examples/s3/put-encrypted-object.go

@@ -1,85 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"os"
-
-	"github.com/minio/minio-go"
-	"github.com/minio/minio-go/pkg/encrypt"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Open a local file that we will upload
-	file, err := os.Open("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer file.Close()
-
-	//// Build an asymmetric key from private and public files
-	//
-	// privateKey, err := ioutil.ReadFile("private.key")
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	//
-	// publicKey, err := ioutil.ReadFile("public.key")
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	//
-	// asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
-	// if err != nil {
-	//	t.Fatal(err)
-	// }
-	////
-
-	// Build a symmetric key
-	symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
-
-	// Build encryption materials which will encrypt uploaded data
-	cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Encrypt file content and upload to the server
-	n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}

+ 0 - 68
vendor/github.com/minio/minio-go/examples/s3/putobject-context.go

@@ -1,68 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"os"
-	"time"
-
-	"context"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
-	defer cancel()
-
-	object, err := os.Open("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer object.Close()
-
-	objectStat, err := object.Stat()
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	n, err := s3Client.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{
-		ContentType: "application/octet-stream",
-	})
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}

+ 0 - 87
vendor/github.com/minio/minio-go/examples/s3/putobject-getobject-sse.go

@@ -1,87 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"bytes"
-	"crypto/md5"
-	"encoding/base64"
-	"io/ioutil"
-	"log"
-
-	minio "github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	minioClient, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	content := bytes.NewReader([]byte("Hello again"))
-	key := []byte("32byteslongsecretkeymustprovided")
-	h := md5.New()
-	h.Write(key)
-	encryptionKey := base64.StdEncoding.EncodeToString(key)
-	encryptionKeyMD5 := base64.StdEncoding.EncodeToString(h.Sum(nil))
-
-	// Amazon S3 does not store the encryption key you provide.
-	// Instead S3 stores a randomly salted HMAC value of the
-	// encryption key in order to validate future requests.
-	// The salted HMAC value cannot be used to derive the value
-	// of the encryption key or to decrypt the contents of the
-	// encrypted object. That means, if you lose the encryption
-	// key, you lose the object.
-	var metadata = map[string]string{
-		"x-amz-server-side-encryption-customer-algorithm": "AES256",
-		"x-amz-server-side-encryption-customer-key":       encryptionKey,
-		"x-amz-server-side-encryption-customer-key-MD5":   encryptionKeyMD5,
-	}
-
-	// minioClient.TraceOn(os.Stderr) // Enable to debug.
-	_, err = minioClient.PutObject("mybucket", "my-encrypted-object.txt", content, 11, minio.PutObjectOptions{UserMetadata: metadata})
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	opts := minio.GetObjectOptions{}
-	for k, v := range metadata {
-		opts.Set(k, v)
-	}
-	coreClient := minio.Core{minioClient}
-	reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", opts)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer reader.Close()
-
-	decBytes, err := ioutil.ReadAll(reader)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	if !bytes.Equal(decBytes, []byte("Hello again")) {
-		log.Fatalln("Expected \"Hello, world\", got %s", string(decBytes))
-	}
-}

+ 0 - 64
vendor/github.com/minio/minio-go/examples/s3/putobject-progress.go

@@ -1,64 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/cheggaaa/pb"
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer reader.Close()
-
-	objectInfo, err := reader.Stat()
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Progress reader is notified as PutObject makes progress with
-	// the Reads inside.
-	progress := pb.New64(objectInfo.Size)
-	progress.Start()
-	n, err := s3Client.PutObject("my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress})
-
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}

+ 0 - 62
vendor/github.com/minio/minio-go/examples/s3/putobject-s3-accelerate.go

@@ -1,62 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"os"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// Enable S3 transfer accelerate endpoint.
-	s3Client.SetS3TransferAccelerate("s3-accelerate.amazonaws.com")
-
-	object, err := os.Open("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer object.Close()
-
-	objectStat, err := object.Stat()
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}

+ 0 - 55
vendor/github.com/minio/minio-go/examples/s3/putobject-streaming.go

@@ -1,55 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"os"
-
-	minio "github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	object, err := os.Open("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer object.Close()
-
-	n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, -1, minio.PutObjectOptions{})
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}

+ 0 - 58
vendor/github.com/minio/minio-go/examples/s3/putobject.go

@@ -1,58 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-	"os"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
-	// my-objectname are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	object, err := os.Open("my-testfile")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	defer object.Close()
-	objectStat, err := object.Stat()
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
-}

+ 0 - 50
vendor/github.com/minio/minio-go/examples/s3/removeallbucketnotification.go

@@ -1,50 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// s3Client.TraceOn(os.Stderr)
-
-	err = s3Client.RemoveAllBucketNotification("my-bucketname")
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	log.Println("Bucket notification are successfully removed.")
-}

+ 0 - 49
vendor/github.com/minio/minio-go/examples/s3/removebucket.go

@@ -1,49 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and  my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// This operation will only work if your bucket is empty.
-	err = s3Client.RemoveBucket("my-bucketname")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Success")
-
-}

+ 0 - 47
vendor/github.com/minio/minio-go/examples/s3/removeincompleteupload.go

@@ -1,47 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	err = s3Client.RemoveIncompleteUpload("my-bucketname", "my-objectname")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Success")
-}

+ 0 - 46
vendor/github.com/minio/minio-go/examples/s3/removeobject.go

@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	err = s3Client.RemoveObject("my-bucketname", "my-objectname")
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Success")
-}

+ 0 - 65
vendor/github.com/minio/minio-go/examples/s3/removeobjects.go

@@ -1,65 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	objectsCh := make(chan string)
-
-	// Send object names that are needed to be removed to objectsCh
-	go func() {
-		defer close(objectsCh)
-		// List all objects from a bucket-name with a matching prefix.
-		for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
-			if object.Err != nil {
-				log.Fatalln(object.Err)
-			}
-			objectsCh <- object.Key
-		}
-	}()
-
-	// Call RemoveObjects API
-	errorCh := s3Client.RemoveObjects("my-bucketname", objectsCh)
-
-	// Print errors received from RemoveObjects API
-	for e := range errorCh {
-		log.Fatalln("Failed to remove " + e.ObjectName + ", error: " + e.Err.Error())
-	}
-
-	log.Println("Success")
-}

+ 0 - 86
vendor/github.com/minio/minio-go/examples/s3/setbucketnotification.go

@@ -1,86 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// s3Client.TraceOn(os.Stderr)
-
-	// ARN represents a notification channel that needs to be created in your S3 provider
-	//  (e.g. http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html)
-
-	// An example of an ARN:
-	//             arn:aws:sns:us-east-1:804064459714:UploadPhoto
-	//                  ^   ^     ^           ^          ^
-	//       Provider __|   |     |           |          |
-	//                      |   Region    Account ID     |_ Notification Name
-	//             Service _|
-	//
-	// You should replace YOUR-PROVIDER, YOUR-SERVICE, YOUR-REGION, YOUR-ACCOUNT-ID and YOUR-RESOURCE
-	// with actual values that you receive from the S3 provider
-
-	// Here you create a new Topic notification
-	topicArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
-	topicConfig := minio.NewNotificationConfig(topicArn)
-	topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
-	topicConfig.AddFilterPrefix("photos/")
-	topicConfig.AddFilterSuffix(".jpg")
-
-	// Create a new Queue notification
-	queueArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
-	queueConfig := minio.NewNotificationConfig(queueArn)
-	queueConfig.AddEvents(minio.ObjectRemovedAll)
-
-	// Create a new Lambda (CloudFunction)
-	lambdaArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
-	lambdaConfig := minio.NewNotificationConfig(lambdaArn)
-	lambdaConfig.AddEvents(minio.ObjectRemovedAll)
-	lambdaConfig.AddFilterSuffix(".swp")
-
-	// Now, set all previously created notification configs
-	bucketNotification := minio.BucketNotification{}
-	bucketNotification.AddTopic(topicConfig)
-	bucketNotification.AddQueue(queueConfig)
-	bucketNotification.AddLambda(lambdaConfig)
-
-	err = s3Client.SetBucketNotification("YOUR-BUCKET", bucketNotification)
-	if err != nil {
-		log.Fatalln("Error: " + err.Error())
-	}
-	log.Println("Success")
-}

+ 0 - 55
vendor/github.com/minio/minio-go/examples/s3/setbucketpolicy.go

@@ -1,55 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-	"github.com/minio/minio-go/pkg/policy"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
-	// dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-
-	// s3Client.TraceOn(os.Stderr)
-
-	// Description of policy input.
-	// policy.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
-	// policy.BucketPolicyReadOnly - Set read-only operations at a prefix.
-	// policy.BucketPolicyWriteOnly - Set write-only operations at a prefix.
-	// policy.BucketPolicyReadWrite - Set read-write operations at a prefix.
-	err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", policy.BucketPolicyReadWrite)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println("Success")
-}

+ 0 - 46
vendor/github.com/minio/minio-go/examples/s3/statobject.go

@@ -1,46 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"log"
-
-	"github.com/minio/minio-go"
-)
-
-func main() {
-	// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
-	// are dummy values, please replace them with original values.
-
-	// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
-	// This boolean value is the last argument for New().
-
-	// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
-	// determined based on the Endpoint value.
-	s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
-	if err != nil {
-		log.Fatalln(err)
-	}
-	stat, err := s3Client.StatObject("my-bucketname", "my-objectname", minio.StatObjectOptions{})
-	if err != nil {
-		log.Fatalln(err)
-	}
-	log.Println(stat)
-}

+ 0 - 6939
vendor/github.com/minio/minio-go/functional_tests.go

@@ -1,6939 +0,0 @@
-// +build ignore
-
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package main
-
-import (
-	"bytes"
-	"context"
-	"encoding/hex"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"math/rand"
-	"mime/multipart"
-	"net/http"
-	"net/url"
-	"os"
-	"path/filepath"
-	"reflect"
-	"runtime"
-	"strconv"
-	"strings"
-	"time"
-
-	humanize "github.com/dustin/go-humanize"
-	minio "github.com/minio/minio-go"
-	log "github.com/sirupsen/logrus"
-
-	"github.com/minio/minio-go/pkg/encrypt"
-	"github.com/minio/minio-go/pkg/policy"
-)
-
-const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
-const (
-	letterIdxBits = 6                    // 6 bits to represent a letter index
-	letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
-	letterIdxMax  = 63 / letterIdxBits   // # of letter indices fitting in 63 bits
-)
-const (
-	serverEndpoint = "SERVER_ENDPOINT"
-	accessKey      = "ACCESS_KEY"
-	secretKey      = "SECRET_KEY"
-	enableHTTPS    = "ENABLE_HTTPS"
-)
-
-type mintJSONFormatter struct {
-}
-
-func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
-	data := make(log.Fields, len(entry.Data))
-	for k, v := range entry.Data {
-		switch v := v.(type) {
-		case error:
-			// Otherwise errors are ignored by `encoding/json`
-			// https://github.com/sirupsen/logrus/issues/137
-			data[k] = v.Error()
-		default:
-			data[k] = v
-		}
-	}
-
-	serialized, err := json.Marshal(data)
-	if err != nil {
-		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
-	}
-	return append(serialized, '\n'), nil
-}
-
-func cleanEmptyEntries(fields log.Fields) log.Fields {
-	cleanFields := log.Fields{}
-	for k, v := range fields {
-		if v != "" {
-			cleanFields[k] = v
-		}
-	}
-	return cleanFields
-}
-
-// log successful test runs
-func successLogger(testName string, function string, args map[string]interface{}, startTime time.Time) *log.Entry {
-	// calculate the test case duration
-	duration := time.Since(startTime)
-	// log with the fields as per mint
-	fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"}
-	return log.WithFields(cleanEmptyEntries(fields))
-}
-
-// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented,
-// and log as NA in that case and continue execution. Otherwise log as failure and return
-func logError(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) {
-	// If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests
-	// Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in
-	// addition to NotImplemented error returned from server
-	if isErrNotImplemented(err) {
-		ignoredLog(testName, function, args, startTime, message).Info()
-	} else {
-		failureLog(testName, function, args, startTime, alert, message, err).Fatal()
-	}
-}
-
-// log failed test runs
-func failureLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
-	// calculate the test case duration
-	duration := time.Since(startTime)
-	var fields log.Fields
-	// log with the fields as per mint
-	if err != nil {
-		fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
-			"duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err}
-	} else {
-		fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
-			"duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message}
-	}
-	return log.WithFields(cleanEmptyEntries(fields))
-}
-
-// log not applicable test runs
-func ignoredLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry {
-	// calculate the test case duration
-	duration := time.Since(startTime)
-	// log with the fields as per mint
-	fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
-		"duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": alert}
-	return log.WithFields(cleanEmptyEntries(fields))
-}
-
-// Delete objects in given bucket, recursively
-func cleanupBucket(bucketName string, c *minio.Client) error {
-	// Create a done channel to control 'ListObjectsV2' go routine.
-	doneCh := make(chan struct{})
-	// Exit cleanly upon return.
-	defer close(doneCh)
-	// Iterate over all objects in the bucket via listObjectsV2 and delete
-	for objCh := range c.ListObjectsV2(bucketName, "", true, doneCh) {
-		if objCh.Err != nil {
-			return objCh.Err
-		}
-		if objCh.Key != "" {
-			err := c.RemoveObject(bucketName, objCh.Key)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	for objPartInfo := range c.ListIncompleteUploads(bucketName, "", true, doneCh) {
-		if objPartInfo.Err != nil {
-			return objPartInfo.Err
-		}
-		if objPartInfo.Key != "" {
-			err := c.RemoveIncompleteUpload(bucketName, objPartInfo.Key)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	// objects are already deleted, clear the buckets now
-	err := c.RemoveBucket(bucketName)
-	if err != nil {
-		return err
-	}
-	return err
-}
-
-func isErrNotImplemented(err error) bool {
-	return minio.ToErrorResponse(err).Code == "NotImplemented"
-}
-
-func init() {
-	// If server endpoint is not set, all tests default to
-	// using https://play.minio.io:9000
-	if os.Getenv(serverEndpoint) == "" {
-		os.Setenv(serverEndpoint, "play.minio.io:9000")
-		os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F")
-		os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG")
-		os.Setenv(enableHTTPS, "1")
-	}
-}
-
-var mintDataDir = os.Getenv("MINT_DATA_DIR")
-
-func getMintDataDirFilePath(filename string) (fp string) {
-	if mintDataDir == "" {
-		return
-	}
-	return filepath.Join(mintDataDir, filename)
-}
-
-type sizedReader struct {
-	io.Reader
-	size int
-}
-
-func (l *sizedReader) Size() int {
-	return l.size
-}
-
-func (l *sizedReader) Close() error {
-	return nil
-}
-
-type randomReader struct{ seed []byte }
-
-func (r *randomReader) Read(b []byte) (int, error) {
-	return copy(b, bytes.Repeat(r.seed, len(b))), nil
-}
-
-// read data from file if it exists or optionally create a buffer of particular size
-func getDataReader(fileName string) io.ReadCloser {
-	if mintDataDir == "" {
-		size := dataFileMap[fileName]
-		return &sizedReader{
-			Reader: io.LimitReader(&randomReader{
-				seed: []byte("a"),
-			}, int64(size)),
-			size: size,
-		}
-	}
-	reader, _ := os.Open(getMintDataDirFilePath(fileName))
-	return reader
-}
-
-// randString generates random names and prepends them with a known prefix.
-func randString(n int, src rand.Source, prefix string) string {
-	b := make([]byte, n)
-	// A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
-	for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
-		if remain == 0 {
-			cache, remain = src.Int63(), letterIdxMax
-		}
-		if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
-			b[i] = letterBytes[idx]
-			i--
-		}
-		cache >>= letterIdxBits
-		remain--
-	}
-	return prefix + string(b[0:30-len(prefix)])
-}
-
-var dataFileMap = map[string]int{
-	"datafile-1-b":     1,
-	"datafile-10-kB":   10 * humanize.KiByte,
-	"datafile-33-kB":   33 * humanize.KiByte,
-	"datafile-100-kB":  100 * humanize.KiByte,
-	"datafile-1.03-MB": 1056 * humanize.KiByte,
-	"datafile-1-MB":    1 * humanize.MiByte,
-	"datafile-5-MB":    5 * humanize.MiByte,
-	"datafile-6-MB":    6 * humanize.MiByte,
-	"datafile-11-MB":   11 * humanize.MiByte,
-	"datafile-65-MB":   65 * humanize.MiByte,
-}
-
-func isQuickMode() bool {
-	return os.Getenv("MODE") == "quick"
-}
-
-func getFuncName() string {
-	pc, _, _, _ := runtime.Caller(1)
-	return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.")
-}
-
-// Tests bucket re-create errors.
-func testMakeBucketError() {
-	region := "eu-central-1"
-
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "MakeBucket(bucketName, region)"
-	// initialize logging params
-	args := map[string]interface{}{
-		"bucketName": "",
-		"region":     region,
-	}
-
-	// skipping region functional tests for non s3 runs
-	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
-		ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
-		return
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket in 'eu-central-1'.
-	if err = c.MakeBucket(bucketName, region); err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket Failed", err)
-		return
-	}
-	if err = c.MakeBucket(bucketName, region); err == nil {
-		logError(testName, function, args, startTime, "", "Bucket already exists", err)
-		return
-	}
-	// Verify valid error response from server.
-	if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
-		minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
-		logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-func testMetadataSizeLimit() {
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
-	args := map[string]interface{}{
-		"bucketName":        "",
-		"objectName":        "",
-		"opts.UserMetadata": "",
-	}
-	rand.Seed(startTime.Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client creation failed", err)
-		return
-	}
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Make bucket failed", err)
-		return
-	}
-
-	const HeaderSizeLimit = 8 * 1024
-	const UserMetadataLimit = 2 * 1024
-
-	// Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail
-	metadata := make(map[string]string)
-	metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test")))
-	args["metadata"] = fmt.Sprint(metadata)
-
-	_, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil)
-		return
-	}
-
-	// Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail
-	metadata = make(map[string]string)
-	metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test")))
-	args["metadata"] = fmt.Sprint(metadata)
-	_, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests various bucket supported formats.
-func testMakeBucketRegions() {
-	region := "eu-central-1"
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "MakeBucket(bucketName, region)"
-	// initialize logging params
-	args := map[string]interface{}{
-		"bucketName": "",
-		"region":     region,
-	}
-
-	// skipping region functional tests for non s3 runs
-	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
-		ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
-		return
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket in 'eu-central-1'.
-	if err = c.MakeBucket(bucketName, region); err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	// Make a new bucket with '.' in its name, in 'us-west-2'. This
-	// request is internally staged into a path style instead of
-	// virtual host style.
-	region = "us-west-2"
-	args["region"] = region
-	if err = c.MakeBucket(bucketName+".withperiod", region); err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test PutObject using a large data to trigger multipart readat
-func testPutObjectReadAt() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, reader, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"opts":       "objectContentType",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Make bucket failed", err)
-		return
-	}
-
-	bufSize := dataFileMap["datafile-65-MB"]
-	var reader = getDataReader("datafile-65-MB")
-	defer reader.Close()
-
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	// Object content type
-	objectContentType := "binary/octet-stream"
-	args["objectContentType"] = objectContentType
-
-	n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
-		return
-	}
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Get Object failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat Object failed", err)
-		return
-	}
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err)
-		return
-	}
-	if st.ContentType != objectContentType {
-		logError(testName, function, args, startTime, "", "Content types don't match", err)
-		return
-	}
-	if err := r.Close(); err != nil {
-		logError(testName, function, args, startTime, "", "Object Close failed", err)
-		return
-	}
-	if err := r.Close(); err == nil {
-		logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test PutObject using a large data to trigger multipart readat
-func testPutObjectWithMetadata() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, reader,size, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"opts":       "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
-	}
-
-	if isQuickMode() {
-		ignoredLog(testName, function, args, startTime, "Skipping functional tests for short runs").Info()
-		return
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Make bucket failed", err)
-		return
-	}
-
-	bufSize := dataFileMap["datafile-65-MB"]
-	var reader = getDataReader("datafile-65-MB")
-	defer reader.Close()
-
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	// Object custom metadata
-	customContentType := "custom/contenttype"
-
-	args["metadata"] = map[string][]string{
-		"Content-Type": {customContentType},
-	}
-
-	n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
-		ContentType: customContentType})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
-		return
-	}
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
-		return
-	}
-	if st.ContentType != customContentType {
-		logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err)
-		return
-	}
-	if err := r.Close(); err != nil {
-		logError(testName, function, args, startTime, "", "Object Close failed", err)
-		return
-	}
-	if err := r.Close(); err == nil {
-		logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put object with streaming signature.
-func testPutObjectStreaming() {
-	// initialize logging params
-	objectName := "test-object"
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, reader,size,opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": objectName,
-		"size":       -1,
-		"opts":       "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Upload an object.
-	sizes := []int64{0, 64*1024 - 1, 64 * 1024}
-
-	for _, size := range sizes {
-		data := bytes.Repeat([]byte("a"), int(size))
-		n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{})
-		if err != nil {
-			logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
-			return
-		}
-
-		if n != size {
-			logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err)
-			return
-		}
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test listing partially uploaded objects.
-func testListPartiallyUploaded() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
-	args := map[string]interface{}{
-		"bucketName":  "",
-		"objectName":  "",
-		"isRecursive": "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Enable tracing, write to stdout.
-	// c.TraceOn(os.Stderr)
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	bufSize := dataFileMap["datafile-65-MB"]
-	r := bytes.NewReader(bytes.Repeat([]byte("0"), bufSize*2))
-
-	reader, writer := io.Pipe()
-	go func() {
-		i := 0
-		for i < 25 {
-			_, cerr := io.CopyN(writer, r, (int64(bufSize)*2)/25)
-			if cerr != nil {
-				logError(testName, function, args, startTime, "", "Copy failed", err)
-				return
-			}
-			i++
-			r.Seek(0, 0)
-		}
-		writer.CloseWithError(errors.New("proactively closed to be verified later"))
-	}()
-
-	objectName := bucketName + "-resumable"
-	args["objectName"] = objectName
-
-	_, err = c.PutObject(bucketName, objectName, reader, int64(bufSize*2), minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "PutObject should fail", err)
-		return
-	}
-	if !strings.Contains(err.Error(), "proactively closed to be verified later") {
-		logError(testName, function, args, startTime, "", "String not found in PutObject output", err)
-		return
-	}
-
-	doneCh := make(chan struct{})
-	defer close(doneCh)
-	isRecursive := true
-	args["isRecursive"] = isRecursive
-
-	multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
-	for multiPartObject := range multiPartObjectCh {
-		if multiPartObject.Err != nil {
-			logError(testName, function, args, startTime, "", "Multipart object error", multiPartObject.Err)
-			return
-		}
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test get object seeker from the end, using whence set to '2'.
-func testGetObjectSeekEnd() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObject(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	buf, err := ioutil.ReadAll(reader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
-		return
-	}
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
-		return
-	}
-
-	pos, err := r.Seek(-100, 2)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Object Seek failed", err)
-		return
-	}
-	if pos != st.Size-100 {
-		logError(testName, function, args, startTime, "", "Incorrect position", err)
-		return
-	}
-	buf2 := make([]byte, 100)
-	m, err := io.ReadFull(r, buf2)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err)
-		return
-	}
-	if m != len(buf2) {
-		logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err)
-		return
-	}
-	hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
-	hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
-	if hexBuf1 != hexBuf2 {
-		logError(testName, function, args, startTime, "", "Values at same index dont match", err)
-		return
-	}
-	pos, err = r.Seek(-100, 2)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Object Seek failed", err)
-		return
-	}
-	if pos != st.Size-100 {
-		logError(testName, function, args, startTime, "", "Incorrect position", err)
-		return
-	}
-	if err = r.Close(); err != nil {
-		logError(testName, function, args, startTime, "", "ObjectClose failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test get object reader to not throw error on being closed twice.
-func testGetObjectClosedTwice() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObject(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err)
-		return
-	}
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
-		return
-	}
-	if err := r.Close(); err != nil {
-		logError(testName, function, args, startTime, "", "Object Close failed", err)
-		return
-	}
-	if err := r.Close(); err == nil {
-		logError(testName, function, args, startTime, "", "Already closed object. No error returned", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test removing multiple objects with Remove API
-func testRemoveMultipleObjects() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "RemoveObjects(bucketName, objectsCh)"
-	args := map[string]interface{}{
-		"bucketName": "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Enable tracing, write to stdout.
-	// c.TraceOn(os.Stderr)
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
-
-	// Multi remove of 1100 objects
-	nrObjects := 200
-
-	objectsCh := make(chan string)
-
-	go func() {
-		defer close(objectsCh)
-		// Upload objects and send them to objectsCh
-		for i := 0; i < nrObjects; i++ {
-			objectName := "sample" + strconv.Itoa(i) + ".txt"
-			_, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"})
-			if err != nil {
-				logError(testName, function, args, startTime, "", "PutObject failed", err)
-				continue
-			}
-			objectsCh <- objectName
-		}
-	}()
-
-	// Call RemoveObjects API
-	errorCh := c.RemoveObjects(bucketName, objectsCh)
-
-	// Check if errorCh doesn't receive any error
-	select {
-	case r, more := <-errorCh:
-		if more {
-			logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
-			return
-		}
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests removing partially uploaded objects.
-func testRemovePartiallyUploaded() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "RemoveIncompleteUpload(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Enable tracing, write to stdout.
-	// c.TraceOn(os.Stderr)
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
-
-	reader, writer := io.Pipe()
-	go func() {
-		i := 0
-		for i < 25 {
-			_, cerr := io.CopyN(writer, r, 128*1024)
-			if cerr != nil {
-				logError(testName, function, args, startTime, "", "Copy failed", err)
-				return
-			}
-			i++
-			r.Seek(0, 0)
-		}
-		writer.CloseWithError(errors.New("proactively closed to be verified later"))
-	}()
-
-	objectName := bucketName + "-resumable"
-	args["objectName"] = objectName
-
-	_, err = c.PutObject(bucketName, objectName, reader, 128*1024, minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "PutObject should fail", err)
-		return
-	}
-	if !strings.Contains(err.Error(), "proactively closed to be verified later") {
-		logError(testName, function, args, startTime, "", "String not found", err)
-		return
-	}
-	err = c.RemoveIncompleteUpload(bucketName, objectName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObject of a big file to trigger multipart
-func testFPutObjectMultipart() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FPutObject(bucketName, objectName, fileName, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"fileName":   "",
-		"opts":       "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
-	var fileName = getMintDataDirFilePath("datafile-65-MB")
-	if fileName == "" {
-		// Make a temp file with minPartSize bytes of data.
-		file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
-		if err != nil {
-			logError(testName, function, args, startTime, "", "TempFile creation failed", err)
-			return
-		}
-		// Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
-		if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
-			logError(testName, function, args, startTime, "", "Copy failed", err)
-			return
-		}
-		if err = file.Close(); err != nil {
-			logError(testName, function, args, startTime, "", "File Close failed", err)
-			return
-		}
-		fileName = file.Name()
-		args["fileName"] = fileName
-	}
-	totalSize := dataFileMap["datafile-65-MB"]
-	// Set base object name
-	objectName := bucketName + "FPutObject" + "-standard"
-	args["objectName"] = objectName
-
-	objectContentType := "testapplication/octet-stream"
-	args["objectContentType"] = objectContentType
-
-	// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
-	n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObject failed", err)
-		return
-	}
-	if n != int64(totalSize) {
-		logError(testName, function, args, startTime, "", "FPutObject failed", err)
-		return
-	}
-
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-	objInfo, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Unexpected error", err)
-		return
-	}
-	if objInfo.Size != int64(totalSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err)
-		return
-	}
-	if objInfo.ContentType != objectContentType {
-		logError(testName, function, args, startTime, "", "ContentType doesn't match", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObject with null contentType (default = application/octet-stream)
-func testFPutObject() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FPutObject(bucketName, objectName, fileName, opts)"
-
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"fileName":   "",
-		"opts":       "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	location := "us-east-1"
-
-	// Make a new bucket.
-	args["bucketName"] = bucketName
-	args["location"] = location
-	function = "MakeBucket()bucketName, location"
-	err = c.MakeBucket(bucketName, location)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
-	// Use different data in part for multipart tests to check parts are uploaded in correct order.
-	var fName = getMintDataDirFilePath("datafile-65-MB")
-	if fName == "" {
-		// Make a temp file with minPartSize bytes of data.
-		file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
-		if err != nil {
-			logError(testName, function, args, startTime, "", "TempFile creation failed", err)
-			return
-		}
-
-		// Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
-		if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
-			logError(testName, function, args, startTime, "", "File copy failed", err)
-			return
-		}
-		// Close the file pro-actively for windows.
-		if err = file.Close(); err != nil {
-			logError(testName, function, args, startTime, "", "File close failed", err)
-			return
-		}
-		defer os.Remove(file.Name())
-		fName = file.Name()
-	}
-	totalSize := dataFileMap["datafile-65-MB"]
-
-	// Set base object name
-	function = "FPutObject(bucketName, objectName, fileName, opts)"
-	objectName := bucketName + "FPutObject"
-	args["objectName"] = objectName + "-standard"
-	args["fileName"] = fName
-	args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"}
-
-	// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
-	n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObject failed", err)
-		return
-	}
-	if n != int64(totalSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
-		return
-	}
-
-	// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
-	args["objectName"] = objectName + "-Octet"
-	n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File close failed", err)
-		return
-	}
-	if n != int64(totalSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
-		return
-	}
-	srcFile, err := os.Open(fName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File open failed", err)
-		return
-	}
-	defer srcFile.Close()
-	// Add extension to temp file name
-	tmpFile, err := os.Create(fName + ".gtar")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File create failed", err)
-		return
-	}
-	defer tmpFile.Close()
-	_, err = io.Copy(tmpFile, srcFile)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File copy failed", err)
-		return
-	}
-
-	// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
-	args["objectName"] = objectName + "-GTar"
-	n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObject failed", err)
-		return
-	}
-	if n != int64(totalSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
-		return
-	}
-
-	// Check headers
-	function = "StatObject(bucketName, objectName, opts)"
-	args["objectName"] = objectName + "-standard"
-	rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-	if rStandard.ContentType != "application/octet-stream" {
-		logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
-		return
-	}
-
-	function = "StatObject(bucketName, objectName, opts)"
-	args["objectName"] = objectName + "-Octet"
-	rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-	if rOctet.ContentType != "application/octet-stream" {
-		logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
-		return
-	}
-
-	function = "StatObject(bucketName, objectName, opts)"
-	args["objectName"] = objectName + "-GTar"
-	rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-	if rGTar.ContentType != "application/x-gtar" {
-		logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	if err = os.Remove(fName + ".gtar"); err != nil {
-		logError(testName, function, args, startTime, "", "File remove failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObjectWithContext request context cancels after timeout
-func testFPutObjectWithContext() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FPutObject(bucketName, objectName, fileName, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"fileName":   "",
-		"opts":       "",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Upload 1 parts worth of data to use multipart upload.
-	// Use different data in part for multipart tests to check parts are uploaded in correct order.
-	var fName = getMintDataDirFilePath("datafile-1-MB")
-	if fName == "" {
-		// Make a temp file with 1 MiB bytes of data.
-		file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
-		if err != nil {
-			logError(testName, function, args, startTime, "", "TempFile creation failed", err)
-			return
-		}
-
-		// Upload 1 parts to trigger multipart upload
-		if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
-			logError(testName, function, args, startTime, "", "File copy failed", err)
-			return
-		}
-		// Close the file pro-actively for windows.
-		if err = file.Close(); err != nil {
-			logError(testName, function, args, startTime, "", "File close failed", err)
-			return
-		}
-		defer os.Remove(file.Name())
-		fName = file.Name()
-	}
-	totalSize := dataFileMap["datafile-1-MB"]
-
-	// Set base object name
-	objectName := bucketName + "FPutObjectWithContext"
-	args["objectName"] = objectName
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	args["ctx"] = ctx
-	defer cancel()
-
-	// Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
-	_, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
-		return
-	}
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	defer cancel()
-	// Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
-	n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err)
-		return
-	}
-	if n != int64(totalSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
-		return
-	}
-
-	_, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Tests FPutObjectWithContext request context cancels after timeout
-func testFPutObjectWithContextV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"opts":       "minio.PutObjectOptions{ContentType:objectContentType}",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Upload 1 parts worth of data to use multipart upload.
-	// Use different data in part for multipart tests to check parts are uploaded in correct order.
-	var fName = getMintDataDirFilePath("datafile-1-MB")
-	if fName == "" {
-		// Make a temp file with 1 MiB bytes of data.
-		file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
-		if err != nil {
-			logError(testName, function, args, startTime, "", "Temp file creation failed", err)
-			return
-		}
-
-		// Upload 1 parts to trigger multipart upload
-		if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
-			logError(testName, function, args, startTime, "", "File copy failed", err)
-			return
-		}
-
-		// Close the file pro-actively for windows.
-		if err = file.Close(); err != nil {
-			logError(testName, function, args, startTime, "", "File close failed", err)
-			return
-		}
-		defer os.Remove(file.Name())
-		fName = file.Name()
-	}
-	totalSize := dataFileMap["datafile-1-MB"]
-
-	// Set base object name
-	objectName := bucketName + "FPutObjectWithContext"
-	args["objectName"] = objectName
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	args["ctx"] = ctx
-	defer cancel()
-
-	// Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
-	_, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
-		return
-	}
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	defer cancel()
-	// Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
-	n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err)
-		return
-	}
-	if n != int64(totalSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err)
-		return
-	}
-
-	_, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test validates putObject with context to see if request cancellation is honored.
-func testPutObjectWithContext() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
-	args := map[string]interface{}{
-		"ctx":        "",
-		"bucketName": "",
-		"objectName": "",
-		"opts":       "",
-	}
-	// Instantiate new minio client object.
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Make a new bucket.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
-		return
-	}
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-	objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
-	args["objectName"] = objectName
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	args["ctx"] = ctx
-	args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"}
-	defer cancel()
-
-	_, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err)
-		return
-	}
-
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	args["ctx"] = ctx
-
-	defer cancel()
-	reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-	_, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Tests get object ReaderSeeker interface methods.
-func testGetObjectReadSeekFunctional() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObject(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	buf, err := ioutil.ReadAll(reader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	// Save the data
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
-		return
-	}
-
-	defer func() {
-		// Delete all objects and buckets
-		if err = cleanupBucket(bucketName, c); err != nil {
-			logError(testName, function, args, startTime, "", "Cleanup failed", err)
-			return
-		}
-	}()
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat object failed", err)
-		return
-	}
-
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
-		return
-	}
-
-	// This following function helps us to compare data from the reader after seek
-	// with the data from the original buffer
-	cmpData := func(r io.Reader, start, end int) {
-		if end-start == 0 {
-			return
-		}
-		buffer := bytes.NewBuffer([]byte{})
-		if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
-			if err != io.EOF {
-				logError(testName, function, args, startTime, "", "CopyN failed", err)
-				return
-			}
-		}
-		if !bytes.Equal(buf[start:end], buffer.Bytes()) {
-			logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
-			return
-		}
-	}
-
-	// Generic seek error for errors other than io.EOF
-	seekErr := errors.New("seek error")
-
-	testCases := []struct {
-		offset    int64
-		whence    int
-		pos       int64
-		err       error
-		shouldCmp bool
-		start     int
-		end       int
-	}{
-		// Start from offset 0, fetch data and compare
-		{0, 0, 0, nil, true, 0, 0},
-		// Start from offset 2048, fetch data and compare
-		{2048, 0, 2048, nil, true, 2048, bufSize},
-		// Start from offset larger than possible
-		{int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
-		// Move to offset 0 without comparing
-		{0, 0, 0, nil, false, 0, 0},
-		// Move one step forward and compare
-		{1, 1, 1, nil, true, 1, bufSize},
-		// Move larger than possible
-		{int64(bufSize), 1, 0, seekErr, false, 0, 0},
-		// Provide negative offset with CUR_SEEK
-		{int64(-1), 1, 0, seekErr, false, 0, 0},
-		// Test with whence SEEK_END and with positive offset
-		{1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
-		// Test with whence SEEK_END and with negative offset
-		{-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
-		// Test with whence SEEK_END and with large negative offset
-		{-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
-	}
-
-	for i, testCase := range testCases {
-		// Perform seek operation
-		n, err := r.Seek(testCase.offset, testCase.whence)
-		// We expect an error
-		if testCase.err == seekErr && err == nil {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
-			return
-		}
-		// We expect a specific error
-		if testCase.err != seekErr && testCase.err != err {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
-			return
-		}
-		// If we expect an error go to the next loop
-		if testCase.err != nil {
-			continue
-		}
-		// Check the returned seek pos
-		if n != testCase.pos {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err)
-			return
-		}
-		// Compare only if shouldCmp is activated
-		if testCase.shouldCmp {
-			cmpData(r, testCase.start, testCase.end)
-		}
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests get object ReaderAt interface methods.
-func testGetObjectReadAtFunctional() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObject(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	buf, err := ioutil.ReadAll(reader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	// Save the data
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
-		return
-	}
-
-	// read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-	offset := int64(2048)
-
-	// read directly
-	buf1 := make([]byte, 512)
-	buf2 := make([]byte, 512)
-	buf3 := make([]byte, 512)
-	buf4 := make([]byte, 512)
-
-	// Test readAt before stat is called.
-	m, err := r.ReadAt(buf1, offset)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAt failed", err)
-		return
-	}
-	if m != len(buf1) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf1, buf[offset:offset+512]) {
-		logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
-		return
-	}
-	offset += 512
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
-		return
-	}
-
-	m, err = r.ReadAt(buf2, offset)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAt failed", err)
-		return
-	}
-	if m != len(buf2) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf2, buf[offset:offset+512]) {
-		logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
-		return
-	}
-	offset += 512
-	m, err = r.ReadAt(buf3, offset)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAt failed", err)
-		return
-	}
-	if m != len(buf3) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf3, buf[offset:offset+512]) {
-		logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
-		return
-	}
-	offset += 512
-	m, err = r.ReadAt(buf4, offset)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAt failed", err)
-		return
-	}
-	if m != len(buf4) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf4, buf[offset:offset+512]) {
-		logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
-		return
-	}
-
-	buf5 := make([]byte, n)
-	// Read the whole object.
-	m, err = r.ReadAt(buf5, 0)
-	if err != nil {
-		if err != io.EOF {
-			logError(testName, function, args, startTime, "", "ReadAt failed", err)
-			return
-		}
-	}
-	if m != len(buf5) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf, buf5) {
-		logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
-		return
-	}
-
-	buf6 := make([]byte, n+1)
-	// Read the whole object and beyond.
-	_, err = r.ReadAt(buf6, 0)
-	if err != nil {
-		if err != io.EOF {
-			logError(testName, function, args, startTime, "", "ReadAt failed", err)
-			return
-		}
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test Presigned Post Policy
-func testPresignedPostPolicy() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PresignedPostPolicy(policy)"
-	args := map[string]interface{}{
-		"policy": "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
-	buf, err := ioutil.ReadAll(reader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	// Save the data
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
-		return
-	}
-
-	policy := minio.NewPostPolicy()
-
-	if err := policy.SetBucket(""); err == nil {
-		logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err)
-		return
-	}
-	if err := policy.SetKey(""); err == nil {
-		logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err)
-		return
-	}
-	if err := policy.SetKeyStartsWith(""); err == nil {
-		logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err)
-		return
-	}
-	if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
-		logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err)
-		return
-	}
-	if err := policy.SetContentType(""); err == nil {
-		logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
-		return
-	}
-	if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
-		logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
-		return
-	}
-	if err := policy.SetUserMetadata("", ""); err == nil {
-		logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err)
-		return
-	}
-
-	policy.SetBucket(bucketName)
-	policy.SetKey(objectName)
-	policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
-	policy.SetContentType("binary/octet-stream")
-	policy.SetContentLengthRange(10, 1024*1024)
-	policy.SetUserMetadata(metadataKey, metadataValue)
-	args["policy"] = policy.String()
-
-	presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
-		return
-	}
-
-	var formBuf bytes.Buffer
-	writer := multipart.NewWriter(&formBuf)
-	for k, v := range formData {
-		writer.WriteField(k, v)
-	}
-
-	// Get a 33KB file to upload and test if set post policy works
-	var filePath = getMintDataDirFilePath("datafile-33-kB")
-	if filePath == "" {
-		// Make a temp file with 33 KB data.
-		file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
-		if err != nil {
-			logError(testName, function, args, startTime, "", "TempFile creation failed", err)
-			return
-		}
-		if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
-			logError(testName, function, args, startTime, "", "Copy failed", err)
-			return
-		}
-		if err = file.Close(); err != nil {
-			logError(testName, function, args, startTime, "", "File Close failed", err)
-			return
-		}
-		filePath = file.Name()
-	}
-
-	// add file to post request
-	f, err := os.Open(filePath)
-	defer f.Close()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File open failed", err)
-		return
-	}
-	w, err := writer.CreateFormFile("file", filePath)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
-		return
-	}
-
-	_, err = io.Copy(w, f)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Copy failed", err)
-		return
-	}
-	writer.Close()
-
-	// make post request with correct form data
-	res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes()))
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Http request failed", err)
-		return
-	}
-	defer res.Body.Close()
-	if res.StatusCode != http.StatusNoContent {
-		logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
-		return
-	}
-
-	// expected path should be absolute path of the object
-	var scheme string
-	if mustParseBool(os.Getenv(enableHTTPS)) {
-		scheme = "https://"
-	} else {
-		scheme = "http://"
-	}
-
-	expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
-
-	if val, ok := res.Header["Location"]; ok {
-		if val[0] != expectedLocation {
-			logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
-			return
-		}
-	} else {
-		logError(testName, function, args, startTime, "", "Location not found in header response", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests copy object
-func testCopyObject() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(dst, src)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Make a new bucket in 'us-east-1' (destination bucket).
-	err = c.MakeBucket(bucketName+"-copy", "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
-		return
-	}
-
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-	// Check the various fields of source object against destination object.
-	objInfo, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-
-	// Copy Source
-	src := minio.NewSourceInfo(bucketName, objectName, nil)
-	args["src"] = src
-
-	// Set copy conditions.
-
-	// All invalid conditions first.
-	err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
-		return
-	}
-	err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
-		return
-	}
-	err = src.SetMatchETagCond("")
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
-		return
-	}
-	err = src.SetMatchETagExceptCond("")
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
-		return
-	}
-
-	err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
-		return
-	}
-	err = src.SetMatchETagCond(objInfo.ETag)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
-		return
-	}
-
-	dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
-	args["dst"] = dst
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-
-	// Perform the Copy
-	err = c.CopyObject(dst, src)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "CopyObject failed", err)
-		return
-	}
-
-	// Source object
-	r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	// Destination object
-	readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-	// Check the various fields of source object against destination object.
-	objInfo, err = r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-	objInfoCopy, err := readerCopy.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-	if objInfo.Size != objInfoCopy.Size {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err)
-		return
-	}
-
-	// CopyObject again but with wrong conditions
-	src = minio.NewSourceInfo(bucketName, objectName, nil)
-	err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
-		return
-	}
-	err = src.SetMatchETagExceptCond(objInfo.ETag)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
-		return
-	}
-
-	// Perform the Copy which should fail
-	err = c.CopyObject(dst, src)
-	if err == nil {
-		logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	if err = cleanupBucket(bucketName+"-copy", c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// TestEncryptionPutGet tests client side encryption
-func testEncryptionPutGet() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
-	args := map[string]interface{}{
-		"bucketName":   "",
-		"objectName":   "",
-		"cbcMaterials": "",
-		"metadata":     "",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate a symmetric key
-	symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
-
-	// Generate an assymmetric key from predefine public and private certificates
-	privateKey, err := hex.DecodeString(
-		"30820277020100300d06092a864886f70d0101010500048202613082025d" +
-			"0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
-			"bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
-			"5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
-			"cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
-			"15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
-			"c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
-			"57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
-			"5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
-			"bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
-			"41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
-			"0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
-			"d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
-			"f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
-			"27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
-			"6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
-			"d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
-			"bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
-			"bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
-			"0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
-			"47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
-			"9945cb5c7d")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
-		return
-	}
-
-	publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
-		"b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
-		"97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
-		"5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
-		"c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
-		"80a89e43f29b570203010001")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
-		return
-	}
-
-	// Generate an asymmetric key
-	asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
-		return
-	}
-
-	testCases := []struct {
-		buf    []byte
-		encKey encrypt.Key
-	}{
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
-
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
-	}
-
-	for i, testCase := range testCases {
-		// Generate a random object name
-		objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-		args["objectName"] = objectName
-
-		// Secured object
-		cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
-		args["cbcMaterials"] = cbcMaterials
-
-		if err != nil {
-			logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
-			return
-		}
-
-		// Put encrypted data
-		_, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
-			return
-		}
-
-		// Read the data back
-		r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
-			return
-		}
-		defer r.Close()
-
-		// Compare the sent object with the received one
-		recvBuffer := bytes.NewBuffer([]byte{})
-		if _, err = io.Copy(recvBuffer, r); err != nil {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
-			return
-		}
-		if recvBuffer.Len() != len(testCase.buf) {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
-			return
-		}
-		if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
-			return
-		}
-
-		successLogger(testName, function, args, startTime).Info()
-
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// TestEncryptionFPut tests client side encryption
-func testEncryptionFPut() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)"
-	args := map[string]interface{}{
-		"bucketName":   "",
-		"objectName":   "",
-		"filePath":     "",
-		"contentType":  "",
-		"cbcMaterials": "",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate a symmetric key
-	symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
-
-	// Generate an assymmetric key from predefine public and private certificates
-	privateKey, err := hex.DecodeString(
-		"30820277020100300d06092a864886f70d0101010500048202613082025d" +
-			"0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
-			"bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
-			"5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
-			"cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
-			"15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
-			"c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
-			"57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
-			"5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
-			"bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
-			"41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
-			"0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
-			"d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
-			"f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
-			"27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
-			"6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
-			"d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
-			"bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
-			"bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
-			"0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
-			"47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
-			"9945cb5c7d")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
-		return
-	}
-
-	publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
-		"b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
-		"97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
-		"5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
-		"c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
-		"80a89e43f29b570203010001")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
-		return
-	}
-
-	// Generate an asymmetric key
-	asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
-		return
-	}
-
-	// Object custom metadata
-	customContentType := "custom/contenttype"
-	args["metadata"] = customContentType
-
-	testCases := []struct {
-		buf    []byte
-		encKey encrypt.Key
-	}{
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
-		{encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
-
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
-		{encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
-	}
-
-	for i, testCase := range testCases {
-		// Generate a random object name
-		objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-		args["objectName"] = objectName
-
-		// Secured object
-		cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
-		args["cbcMaterials"] = cbcMaterials
-
-		if err != nil {
-			logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
-			return
-		}
-		// Generate a random file name.
-		fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-		file, err := os.Create(fileName)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "file create failed", err)
-			return
-		}
-		_, err = file.Write(testCase.buf)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "file write failed", err)
-			return
-		}
-		file.Close()
-		// Put encrypted data
-		if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil {
-			logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
-			return
-		}
-
-		// Read the data back
-		r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
-			return
-		}
-		defer r.Close()
-
-		// Compare the sent object with the received one
-		recvBuffer := bytes.NewBuffer([]byte{})
-		if _, err = io.Copy(recvBuffer, r); err != nil {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
-			return
-		}
-		if recvBuffer.Len() != len(testCase.buf) {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
-			return
-		}
-		if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
-			logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
-			return
-		}
-
-		if err = os.Remove(fileName); err != nil {
-			logError(testName, function, args, startTime, "", "File remove failed", err)
-			return
-		}
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-func testBucketNotification() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "SetBucketNotification(bucketName)"
-	args := map[string]interface{}{
-		"bucketName": "",
-	}
-
-	if os.Getenv("NOTIFY_BUCKET") == "" ||
-		os.Getenv("NOTIFY_SERVICE") == "" ||
-		os.Getenv("NOTIFY_REGION") == "" ||
-		os.Getenv("NOTIFY_ACCOUNTID") == "" ||
-		os.Getenv("NOTIFY_RESOURCE") == "" {
-		ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info()
-		return
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable to debug
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	bucketName := os.Getenv("NOTIFY_BUCKET")
-	args["bucketName"] = bucketName
-
-	topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
-	queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
-
-	topicConfig := minio.NewNotificationConfig(topicArn)
-
-	topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
-	topicConfig.AddFilterSuffix("jpg")
-
-	queueConfig := minio.NewNotificationConfig(queueArn)
-	queueConfig.AddEvents(minio.ObjectCreatedAll)
-	queueConfig.AddFilterPrefix("photos/")
-
-	bNotification := minio.BucketNotification{}
-	bNotification.AddTopic(topicConfig)
-
-	// Add the same topicConfig again, should have no effect
-	// because it is duplicated
-	bNotification.AddTopic(topicConfig)
-	if len(bNotification.TopicConfigs) != 1 {
-		logError(testName, function, args, startTime, "", "Duplicate entry added", err)
-		return
-	}
-
-	// Add and remove a queue config
-	bNotification.AddQueue(queueConfig)
-	bNotification.RemoveQueueByArn(queueArn)
-
-	err = c.SetBucketNotification(bucketName, bNotification)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetBucketNotification failed", err)
-		return
-	}
-
-	bNotification, err = c.GetBucketNotification(bucketName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetBucketNotification failed", err)
-		return
-	}
-
-	if len(bNotification.TopicConfigs) != 1 {
-		logError(testName, function, args, startTime, "", "Topic config is empty", err)
-		return
-	}
-
-	if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
-		logError(testName, function, args, startTime, "", "Couldn't get the suffix", err)
-		return
-	}
-
-	err = c.RemoveAllBucketNotification(bucketName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests comprehensive list of all methods.
-func testFunctional() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "testFunctional()"
-	function_all := ""
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	c, err := minio.New(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, nil, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable to debug
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
-	// Make a new bucket.
-	function = "MakeBucket(bucketName, region)"
-	function_all = "MakeBucket(bucketName, region)"
-	args["bucketName"] = bucketName
-	err = c.MakeBucket(bucketName, "us-east-1")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate a random file name.
-	fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	file, err := os.Create(fileName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File creation failed", err)
-		return
-	}
-	for i := 0; i < 3; i++ {
-		buf := make([]byte, rand.Intn(1<<19))
-		_, err = file.Write(buf)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "File write failed", err)
-			return
-		}
-	}
-	file.Close()
-
-	// Verify if bucket exits and you have access.
-	var exists bool
-	function = "BucketExists(bucketName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-	}
-	exists, err = c.BucketExists(bucketName)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "BucketExists failed", err)
-		return
-	}
-	if !exists {
-		logError(testName, function, args, startTime, "", "Could not find the bucket", err)
-		return
-	}
-
-	// Asserting the default bucket policy.
-	function = "GetBucketPolicy(bucketName, objectPrefix)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-	}
-	policyAccess, err := c.GetBucketPolicy(bucketName, "")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
-		return
-	}
-	if policyAccess != "none" {
-		logError(testName, function, args, startTime, "", "policy should be set to none", err)
-		return
-	}
-
-	// Set the bucket policy to 'public readonly'.
-	function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-		"bucketPolicy": policy.BucketPolicyReadOnly,
-	}
-	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
-		return
-	}
-	// should return policy `readonly`.
-	function = "GetBucketPolicy(bucketName, objectPrefix)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-	}
-	policyAccess, err = c.GetBucketPolicy(bucketName, "")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
-		return
-	}
-	if policyAccess != "readonly" {
-		logError(testName, function, args, startTime, "", "policy should be set to readonly", err)
-		return
-	}
-
-	// Make the bucket 'public writeonly'.
-	function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-		"bucketPolicy": policy.BucketPolicyWriteOnly,
-	}
-	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
-		return
-	}
-	// should return policy `writeonly`.
-	function = "GetBucketPolicy(bucketName, objectPrefix)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-	}
-	policyAccess, err = c.GetBucketPolicy(bucketName, "")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
-		return
-	}
-	if policyAccess != "writeonly" {
-		logError(testName, function, args, startTime, "", "policy should be set to writeonly", err)
-		return
-	}
-	// Make the bucket 'public read/write'.
-	function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-		"bucketPolicy": policy.BucketPolicyReadWrite,
-	}
-	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
-		return
-	}
-	// should return policy `readwrite`.
-	function = "GetBucketPolicy(bucketName, objectPrefix)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-	}
-	policyAccess, err = c.GetBucketPolicy(bucketName, "")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
-		return
-	}
-	if policyAccess != "readwrite" {
-		logError(testName, function, args, startTime, "", "policy should be set to readwrite", err)
-		return
-	}
-	// List all buckets.
-	function = "ListBuckets()"
-	function_all += ", " + function
-	args = nil
-	buckets, err := c.ListBuckets()
-
-	if len(buckets) == 0 {
-		logError(testName, function, args, startTime, "", "Found bucket list to be empty", err)
-		return
-	}
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ListBuckets failed", err)
-		return
-	}
-
-	// Verify if previously created bucket is listed in list buckets.
-	bucketFound := false
-	for _, bucket := range buckets {
-		if bucket.Name == bucketName {
-			bucketFound = true
-		}
-	}
-
-	// If bucket not found error out.
-	if !bucketFound {
-		logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err)
-		return
-	}
-
-	objectName := bucketName + "unique"
-
-	// Generate data
-	buf := bytes.Repeat([]byte("f"), 1<<19)
-
-	function = "PutObject(bucketName, objectName, reader, contentType)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName,
-		"contentType": "",
-	}
-
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(len(buf)) {
-		logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
-		return
-	}
-
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName + "-nolength",
-		"contentType": "binary/octet-stream",
-	}
-
-	n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(len(buf)) {
-		logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
-		return
-	}
-
-	// Instantiate a done channel to close all listing.
-	doneCh := make(chan struct{})
-	defer close(doneCh)
-
-	objFound := false
-	isRecursive := true // Recursive is true.
-
-	function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName,
-		"isRecursive": isRecursive,
-	}
-
-	for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
-		if obj.Key == objectName {
-			objFound = true
-			break
-		}
-	}
-	if !objFound {
-		logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
-		return
-	}
-
-	objFound = false
-	isRecursive = true // Recursive is true.
-	function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName,
-		"isRecursive": isRecursive,
-	}
-
-	for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
-		if obj.Key == objectName {
-			objFound = true
-			break
-		}
-	}
-	if !objFound {
-		logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
-		return
-	}
-
-	incompObjNotFound := true
-
-	function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName,
-		"isRecursive": isRecursive,
-	}
-
-	for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
-		if objIncompl.Key != "" {
-			incompObjNotFound = false
-			break
-		}
-	}
-	if !incompObjNotFound {
-		logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
-		return
-	}
-
-	function = "GetObject(bucketName, objectName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-	}
-	newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	newReadBytes, err := ioutil.ReadAll(newReader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	if !bytes.Equal(newReadBytes, buf) {
-		logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
-		return
-	}
-
-	function = "FGetObject(bucketName, objectName, fileName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-		"fileName":   fileName + "-f",
-	}
-	err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FGetObject failed", err)
-		return
-	}
-
-	function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": "",
-		"expires":    3600 * time.Second,
-	}
-	if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject success", err)
-		return
-	}
-
-	// Generate presigned HEAD object url.
-	function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-		"expires":    3600 * time.Second,
-	}
-	presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
-		return
-	}
-	// Verify if presigned url works.
-	resp, err := http.Head(presignedHeadURL.String())
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
-		return
-	}
-	if resp.StatusCode != http.StatusOK {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err)
-		return
-	}
-	if resp.Header.Get("ETag") == "" {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
-		return
-	}
-	resp.Body.Close()
-
-	function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": "",
-		"expires":    3600 * time.Second,
-	}
-	_, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil)
-	if err == nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject success", err)
-		return
-	}
-
-	// Generate presigned GET object url.
-	function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-		"expires":    3600 * time.Second,
-	}
-	presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
-		return
-	}
-
-	// Verify if presigned url works.
-	resp, err = http.Get(presignedGetURL.String())
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
-		return
-	}
-	if resp.StatusCode != http.StatusOK {
-		logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
-		return
-	}
-	newPresignedBytes, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
-		return
-	}
-	resp.Body.Close()
-	if !bytes.Equal(newPresignedBytes, buf) {
-		logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
-		return
-	}
-
-	// Set request parameters.
-	reqParams := make(url.Values)
-	reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-		"expires":    3600 * time.Second,
-		"reqParams":  reqParams,
-	}
-	presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
-		return
-	}
-	// Verify if presigned url works.
-	resp, err = http.Get(presignedGetURL.String())
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
-		return
-	}
-	if resp.StatusCode != http.StatusOK {
-		logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
-		return
-	}
-	newPresignedBytes, err = ioutil.ReadAll(resp.Body)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
-		return
-	}
-	if !bytes.Equal(newPresignedBytes, buf) {
-		logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err)
-		return
-	}
-	if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
-		logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err)
-		return
-	}
-
-	function = "PresignedPutObject(bucketName, objectName, expires)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": "",
-		"expires":    3600 * time.Second,
-	}
-	_, err = c.PresignedPutObject(bucketName, "", 3600*time.Second)
-	if err == nil {
-		logError(testName, function, args, startTime, "", "PresignedPutObject success", err)
-		return
-	}
-
-	function = "PresignedPutObject(bucketName, objectName, expires)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName + "-presigned",
-		"expires":    3600 * time.Second,
-	}
-	presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
-		return
-	}
-
-	buf = bytes.Repeat([]byte("g"), 1<<19)
-
-	req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err)
-		return
-	}
-	httpClient := &http.Client{
-		// Setting a sensible time out of 30secs to wait for response
-		// headers. Request is pro-actively cancelled after 30secs
-		// with no response.
-		Timeout:   30 * time.Second,
-		Transport: http.DefaultTransport,
-	}
-	resp, err = httpClient.Do(req)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
-		return
-	}
-
-	newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err)
-		return
-	}
-
-	newReadBytes, err = ioutil.ReadAll(newReader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
-		return
-	}
-
-	if !bytes.Equal(newReadBytes, buf) {
-		logError(testName, function, args, startTime, "", "Bytes mismatch", err)
-		return
-	}
-
-	function = "RemoveObject(bucketName, objectName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-	}
-	err = c.RemoveObject(bucketName, objectName)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveObject failed", err)
-		return
-	}
-	args["objectName"] = objectName + "-f"
-	err = c.RemoveObject(bucketName, objectName+"-f")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveObject failed", err)
-		return
-	}
-
-	args["objectName"] = objectName + "-nolength"
-	err = c.RemoveObject(bucketName, objectName+"-nolength")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveObject failed", err)
-		return
-	}
-
-	args["objectName"] = objectName + "-presigned"
-	err = c.RemoveObject(bucketName, objectName+"-presigned")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveObject failed", err)
-		return
-	}
-
-	function = "RemoveBucket(bucketName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-	}
-	err = c.RemoveBucket(bucketName)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
-		return
-	}
-	err = c.RemoveBucket(bucketName)
-	if err == nil {
-		logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err)
-		return
-	}
-	if err.Error() != "The specified bucket does not exist" {
-		logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
-		return
-	}
-
-	if err = os.Remove(fileName); err != nil {
-		logError(testName, function, args, startTime, "", "File Remove failed", err)
-		return
-	}
-	if err = os.Remove(fileName + "-f"); err != nil {
-		logError(testName, function, args, startTime, "", "File Remove failed", err)
-		return
-	}
-	successLogger(testName, function_all, args, startTime).Info()
-}
-
-// Test for validating GetObject Reader* methods functioning when the
-// object is modified in the object store.
-func testGetObjectModified() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObject(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Make a new bucket.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-	defer c.RemoveBucket(bucketName)
-
-	// Upload an object.
-	objectName := "myobject"
-	args["objectName"] = objectName
-	content := "helloworld"
-	_, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
-		return
-	}
-
-	defer c.RemoveObject(bucketName, objectName)
-
-	reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err)
-		return
-	}
-	defer reader.Close()
-
-	// Read a few bytes of the object.
-	b := make([]byte, 5)
-	n, err := reader.ReadAt(b, 0)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err)
-		return
-	}
-
-	// Upload different contents to the same object while object is being read.
-	newContent := "goodbyeworld"
-	_, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
-		return
-	}
-
-	// Confirm that a Stat() call in between doesn't change the Object's cached etag.
-	_, err = reader.Stat()
-	expectedError := "At least one of the pre-conditions you specified did not hold"
-	if err.Error() != expectedError {
-		logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
-		return
-	}
-
-	// Read again only to find object contents have been modified since last read.
-	_, err = reader.ReadAt(b, int64(n))
-	if err.Error() != expectedError {
-		logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test validates putObject to upload a file seeked at a given offset.
-func testPutObjectUploadSeekedObject() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
-	args := map[string]interface{}{
-		"bucketName":   "",
-		"objectName":   "",
-		"fileToUpload": "",
-		"contentType":  "binary/octet-stream",
-	}
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Make a new bucket.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-	defer c.RemoveBucket(bucketName)
-
-	var tempfile *os.File
-
-	if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" {
-		tempfile, err = os.Open(fileName)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "File open failed", err)
-			return
-		}
-		args["fileToUpload"] = fileName
-	} else {
-		tempfile, err = ioutil.TempFile("", "minio-go-upload-test-")
-		if err != nil {
-			logError(testName, function, args, startTime, "", "TempFile create failed", err)
-			return
-		}
-		args["fileToUpload"] = tempfile.Name()
-
-		// Generate 100kB data
-		if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil {
-			logError(testName, function, args, startTime, "", "File copy failed", err)
-			return
-		}
-
-		defer os.Remove(tempfile.Name())
-
-		// Seek back to the beginning of the file.
-		tempfile.Seek(0, 0)
-	}
-	var length = 100 * humanize.KiByte
-	objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
-	args["objectName"] = objectName
-
-	offset := length / 2
-	if _, err = tempfile.Seek(int64(offset), 0); err != nil {
-		logError(testName, function, args, startTime, "", "TempFile seek failed", err)
-		return
-	}
-
-	n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-	if n != int64(length-offset) {
-		logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err)
-		return
-	}
-	tempfile.Close()
-
-	obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	n, err = obj.Seek(int64(offset), 0)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Seek failed", err)
-		return
-	}
-	if n != int64(offset) {
-		logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err)
-		return
-	}
-
-	n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-	if n != int64(length-offset) {
-		logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests bucket re-create errors.
-func testMakeBucketErrorV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "MakeBucket(bucketName, region)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"region":     "eu-west-1",
-	}
-
-	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
-		ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
-		return
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	region := "eu-west-1"
-	args["bucketName"] = bucketName
-	args["region"] = region
-
-	// Make a new bucket in 'eu-west-1'.
-	if err = c.MakeBucket(bucketName, region); err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-	if err = c.MakeBucket(bucketName, region); err == nil {
-		logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err)
-		return
-	}
-	// Verify valid error response from server.
-	if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
-		minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
-		logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test get object reader to not throw error on being closed twice.
-func testGetObjectClosedTwiceV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "MakeBucket(bucketName, region)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"region":     "eu-west-1",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
-		return
-	}
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
-		return
-	}
-	if err := r.Close(); err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-	if err := r.Close(); err == nil {
-		logError(testName, function, args, startTime, "", "Object is already closed, should return error", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests removing partially uploaded objects.
-func testRemovePartiallyUploadedV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "RemoveIncompleteUpload(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Enable tracing, write to stdout.
-	// c.TraceOn(os.Stderr)
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
-
-	reader, writer := io.Pipe()
-	go func() {
-		i := 0
-		for i < 25 {
-			_, cerr := io.CopyN(writer, r, 128*1024)
-			if cerr != nil {
-				logError(testName, function, args, startTime, "", "Copy failed", cerr)
-				return
-			}
-			i++
-			r.Seek(0, 0)
-		}
-		writer.CloseWithError(errors.New("proactively closed to be verified later"))
-	}()
-
-	objectName := bucketName + "-resumable"
-	args["objectName"] = objectName
-
-	_, err = c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "PutObject should fail", err)
-		return
-	}
-	if err.Error() != "proactively closed to be verified later" {
-		logError(testName, function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err)
-		return
-	}
-	err = c.RemoveIncompleteUpload(bucketName, objectName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests FPutObject hidden contentType setting
-func testFPutObjectV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FPutObject(bucketName, objectName, fileName, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"fileName":   "",
-		"opts":       "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Make a temp file with 11*1024*1024 bytes of data.
-	file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "TempFile creation failed", err)
-		return
-	}
-
-	r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
-	n, err := io.CopyN(file, r, 11*1024*1024)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Copy failed", err)
-		return
-	}
-	if n != int64(11*1024*1024) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
-		return
-	}
-
-	// Close the file pro-actively for windows.
-	err = file.Close()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File close failed", err)
-		return
-	}
-
-	// Set base object name
-	objectName := bucketName + "FPutObject"
-	args["objectName"] = objectName
-	args["fileName"] = file.Name()
-
-	// Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
-	n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObject failed", err)
-		return
-	}
-	if n != int64(11*1024*1024) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
-		return
-	}
-
-	// Perform FPutObject with no contentType provided (Expecting application/octet-stream)
-	args["objectName"] = objectName + "-Octet"
-	args["contentType"] = ""
-
-	n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObject failed", err)
-		return
-	}
-	if n != int64(11*1024*1024) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
-		return
-	}
-
-	// Add extension to temp file name
-	fileName := file.Name()
-	err = os.Rename(file.Name(), fileName+".gtar")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Rename failed", err)
-		return
-	}
-
-	// Perform FPutObject with no contentType provided (Expecting application/x-gtar)
-	args["objectName"] = objectName + "-Octet"
-	args["contentType"] = ""
-	args["fileName"] = fileName + ".gtar"
-
-	n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FPutObject failed", err)
-		return
-	}
-	if n != int64(11*1024*1024) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
-		return
-	}
-
-	// Check headers
-	rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-	if rStandard.ContentType != "application/octet-stream" {
-		logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err)
-		return
-	}
-
-	rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-	if rOctet.ContentType != "application/octet-stream" {
-		logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err)
-		return
-	}
-
-	rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-	if rGTar.ContentType != "application/x-gtar" {
-		logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	err = os.Remove(fileName + ".gtar")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "File remove failed", err)
-		return
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests various bucket supported formats.
-func testMakeBucketRegionsV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "MakeBucket(bucketName, region)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"region":     "eu-west-1",
-	}
-
-	if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
-		ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
-		return
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket in 'eu-central-1'.
-	if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	// Make a new bucket with '.' in its name, in 'us-west-2'. This
-	// request is internally staged into a path style instead of
-	// virtual host style.
-	if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
-		args["bucketName"] = bucketName + ".withperiod"
-		args["region"] = "us-west-2"
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests get object ReaderSeeker interface methods.
-func testGetObjectReadSeekFunctionalV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObject(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	buf, err := ioutil.ReadAll(reader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	// Save the data.
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
-		return
-	}
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
-		return
-	}
-
-	offset := int64(2048)
-	n, err = r.Seek(offset, 0)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Seek failed", err)
-		return
-	}
-	if n != offset {
-		logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
-		return
-	}
-	n, err = r.Seek(0, 1)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Seek failed", err)
-		return
-	}
-	if n != offset {
-		logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
-		return
-	}
-	_, err = r.Seek(offset, 2)
-	if err == nil {
-		logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err)
-		return
-	}
-	n, err = r.Seek(-offset, 2)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Seek failed", err)
-		return
-	}
-	if n != st.Size-offset {
-		logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err)
-		return
-	}
-
-	var buffer1 bytes.Buffer
-	if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
-		if err != io.EOF {
-			logError(testName, function, args, startTime, "", "Copy failed", err)
-			return
-		}
-	}
-	if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
-		logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
-		return
-	}
-
-	// Seek again and read again.
-	n, err = r.Seek(offset-1, 0)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Seek failed", err)
-		return
-	}
-	if n != (offset - 1) {
-		logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err)
-		return
-	}
-
-	var buffer2 bytes.Buffer
-	if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
-		if err != io.EOF {
-			logError(testName, function, args, startTime, "", "Copy failed", err)
-			return
-		}
-	}
-	// Verify now lesser bytes.
-	if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
-		logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests get object ReaderAt interface methods.
-func testGetObjectReadAtFunctionalV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObject(bucketName, objectName)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	buf, err := ioutil.ReadAll(reader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	// Save the data
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
-		return
-	}
-
-	// Read the data back
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
-		return
-	}
-
-	offset := int64(2048)
-
-	// Read directly
-	buf2 := make([]byte, 512)
-	buf3 := make([]byte, 512)
-	buf4 := make([]byte, 512)
-
-	m, err := r.ReadAt(buf2, offset)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAt failed", err)
-		return
-	}
-	if m != len(buf2) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf2, buf[offset:offset+512]) {
-		logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
-		return
-	}
-	offset += 512
-	m, err = r.ReadAt(buf3, offset)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAt failed", err)
-		return
-	}
-	if m != len(buf3) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf3, buf[offset:offset+512]) {
-		logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
-		return
-	}
-	offset += 512
-	m, err = r.ReadAt(buf4, offset)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAt failed", err)
-		return
-	}
-	if m != len(buf4) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf4, buf[offset:offset+512]) {
-		logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
-		return
-	}
-
-	buf5 := make([]byte, n)
-	// Read the whole object.
-	m, err = r.ReadAt(buf5, 0)
-	if err != nil {
-		if err != io.EOF {
-			logError(testName, function, args, startTime, "", "ReadAt failed", err)
-			return
-		}
-	}
-	if m != len(buf5) {
-		logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err)
-		return
-	}
-	if !bytes.Equal(buf, buf5) {
-		logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
-		return
-	}
-
-	buf6 := make([]byte, n+1)
-	// Read the whole object and beyond.
-	_, err = r.ReadAt(buf6, 0)
-	if err != nil {
-		if err != io.EOF {
-			logError(testName, function, args, startTime, "", "ReadAt failed", err)
-			return
-		}
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Tests copy object
-func testCopyObjectV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(destination, source)"
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Make a new bucket in 'us-east-1' (destination bucket).
-	err = c.MakeBucket(bucketName+"-copy", "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate 33K of data.
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
-		return
-	}
-
-	r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-	// Check the various fields of source object against destination object.
-	objInfo, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-
-	// Copy Source
-	src := minio.NewSourceInfo(bucketName, objectName, nil)
-	args["source"] = src
-
-	// Set copy conditions.
-
-	// All invalid conditions first.
-	err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
-		return
-	}
-	err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
-		return
-	}
-	err = src.SetMatchETagCond("")
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
-		return
-	}
-	err = src.SetMatchETagExceptCond("")
-	if err == nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
-		return
-	}
-
-	err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
-		return
-	}
-	err = src.SetMatchETagCond(objInfo.ETag)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
-		return
-	}
-
-	dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
-	args["destination"] = dst
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-
-	// Perform the Copy
-	err = c.CopyObject(dst, src)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "CopyObject failed", err)
-		return
-	}
-
-	// Source object
-	r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-	// Destination object
-	readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-	// Check the various fields of source object against destination object.
-	objInfo, err = r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-	objInfoCopy, err := readerCopy.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Stat failed", err)
-		return
-	}
-	if objInfo.Size != objInfoCopy.Size {
-		logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err)
-		return
-	}
-
-	// CopyObject again but with wrong conditions
-	src = minio.NewSourceInfo(bucketName, objectName, nil)
-	err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
-		return
-	}
-	err = src.SetMatchETagExceptCond(objInfo.ETag)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
-		return
-	}
-
-	// Perform the Copy which should fail
-	err = c.CopyObject(dst, src)
-	if err == nil {
-		logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	if err = cleanupBucket(bucketName+"-copy", c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-func testComposeObjectErrorCasesWrapper(c *minio.Client) {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "ComposeObject(destination, sourceList)"
-	args := map[string]interface{}{}
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err := c.MakeBucket(bucketName, "us-east-1")
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Test that more than 10K source objects cannot be
-	// concatenated.
-	srcArr := [10001]minio.SourceInfo{}
-	srcSlice := srcArr[:]
-	dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-
-	args["destination"] = dst
-	// Just explain about srcArr in args["sourceList"]
-	// to stop having 10,001 null headers logged
-	args["sourceList"] = "source array of 10,001 elements"
-	if err := c.ComposeObject(dst, srcSlice); err == nil {
-		logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err)
-		return
-	} else if err.Error() != "There must be as least one and up to 10000 source objects." {
-		logError(testName, function, args, startTime, "", "Got unexpected error", err)
-		return
-	}
-
-	// Create a source with invalid offset spec and check that
-	// error is returned:
-	// 1. Create the source object.
-	const badSrcSize = 5 * 1024 * 1024
-	buf := bytes.Repeat([]byte("1"), badSrcSize)
-	_, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-	// 2. Set invalid range spec on the object (going beyond
-	// object size)
-	badSrc := minio.NewSourceInfo(bucketName, "badObject", nil)
-	err = badSrc.SetRange(1, badSrcSize)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err)
-		return
-	}
-	// 3. ComposeObject call should fail.
-	if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil {
-		logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err)
-		return
-	} else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
-		logError(testName, function, args, startTime, "", "Got invalid error", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test expected error cases
-func testComposeObjectErrorCasesV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "ComposeObject(destination, sourceList)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	testComposeObjectErrorCasesWrapper(c)
-}
-
-func testComposeMultipleSources(c *minio.Client) {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "ComposeObject(destination, sourceList)"
-	args := map[string]interface{}{
-		"destination": "",
-		"sourceList":  "",
-	}
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err := c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Upload a small source object
-	const srcSize = 1024 * 1024 * 5
-	buf := bytes.Repeat([]byte("1"), srcSize)
-	_, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	// We will append 10 copies of the object.
-	srcs := []minio.SourceInfo{}
-	for i := 0; i < 10; i++ {
-		srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil))
-	}
-	// make the last part very small
-	err = srcs[9].SetRange(0, 0)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetRange failed", err)
-		return
-	}
-	args["sourceList"] = srcs
-
-	dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil)
-	args["destination"] = dst
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-	err = c.ComposeObject(dst, srcs)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ComposeObject failed", err)
-		return
-	}
-
-	objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "StatObject failed", err)
-		return
-	}
-
-	if objProps.Size != 9*srcSize+1 {
-		logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test concatenating multiple objects objects
-func testCompose10KSourcesV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "ComposeObject(destination, sourceList)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	testComposeMultipleSources(c)
-}
-
-func testEncryptedCopyObjectWrapper(c *minio.Client) {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(destination, source)"
-	args := map[string]interface{}{}
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err := c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
-	key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256")
-
-	// 1. create an sse-c encrypted object to copy by uploading
-	const srcSize = 1024 * 1024
-	buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
-	metadata := make(map[string]string)
-	for k, v := range key1.GetSSEHeaders() {
-		metadata[k] = v
-	}
-	_, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject call failed", err)
-		return
-	}
-
-	// 2. copy object and change encryption key
-	src := minio.NewSourceInfo(bucketName, "srcObject", &key1)
-	args["source"] = src
-	dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-	args["destination"] = dst
-
-	err = c.CopyObject(dst, src)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "CopyObject failed", err)
-		return
-	}
-
-	// 3. get copied object and check if content is equal
-	opts := minio.GetObjectOptions{}
-	for k, v := range key2.GetSSEHeaders() {
-		opts.Set(k, v)
-	}
-	coreClient := minio.Core{c}
-	reader, _, err := coreClient.GetObject(bucketName, "dstObject", opts)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-	defer reader.Close()
-
-	decBytes, err := ioutil.ReadAll(reader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-	if !bytes.Equal(decBytes, buf) {
-		logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test encrypted copy object
-func testEncryptedCopyObject() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(destination, source)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	// c.TraceOn(os.Stderr)
-	testEncryptedCopyObjectWrapper(c)
-}
-
-// Test encrypted copy object
-func testEncryptedCopyObjectV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(destination, source)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
-		return
-	}
-
-	testEncryptedCopyObjectWrapper(c)
-}
-
-func testUserMetadataCopying() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(destination, source)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	// c.TraceOn(os.Stderr)
-	testUserMetadataCopyingWrapper(c)
-}
-
-func testUserMetadataCopyingWrapper(c *minio.Client) {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(destination, source)"
-	args := map[string]interface{}{}
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err := c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	fetchMeta := func(object string) (h http.Header) {
-		objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
-		if err != nil {
-			logError(testName, function, args, startTime, "", "Stat failed", err)
-			return
-		}
-		h = make(http.Header)
-		for k, vs := range objInfo.Metadata {
-			if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
-				for _, v := range vs {
-					h.Add(k, v)
-				}
-			}
-		}
-		return h
-	}
-
-	// 1. create a client encrypted object to copy by uploading
-	const srcSize = 1024 * 1024
-	buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
-	metadata := make(http.Header)
-	metadata.Set("x-amz-meta-myheader", "myvalue")
-	m := make(map[string]string)
-	m["x-amz-meta-myheader"] = "myvalue"
-	_, err = c.PutObject(bucketName, "srcObject",
-		bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err)
-		return
-	}
-	if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	// 2. create source
-	src := minio.NewSourceInfo(bucketName, "srcObject", nil)
-	// 2.1 create destination with metadata set
-	dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-
-	// 3. Check that copying to an object with metadata set resets
-	// the headers on the copy.
-	args["source"] = src
-	args["destination"] = dst1
-	err = c.CopyObject(dst1, src)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "CopyObject failed", err)
-		return
-	}
-
-	expectedHeaders := make(http.Header)
-	expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
-	if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	// 4. create destination with no metadata set and same source
-	dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-	src = minio.NewSourceInfo(bucketName, "srcObject", nil)
-
-	// 5. Check that copying to an object with no metadata set,
-	// copies metadata.
-	args["source"] = src
-	args["destination"] = dst2
-	err = c.CopyObject(dst2, src)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "CopyObject failed", err)
-		return
-	}
-
-	expectedHeaders = metadata
-	if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	// 6. Compose a pair of sources.
-	srcs := []minio.SourceInfo{
-		minio.NewSourceInfo(bucketName, "srcObject", nil),
-		minio.NewSourceInfo(bucketName, "srcObject", nil),
-	}
-	dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-
-	function = "ComposeObject(destination, sources)"
-	args["source"] = srcs
-	args["destination"] = dst3
-	err = c.ComposeObject(dst3, srcs)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ComposeObject failed", err)
-		return
-	}
-
-	// Check that no headers are copied in this case
-	if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	// 7. Compose a pair of sources with dest user metadata set.
-	srcs = []minio.SourceInfo{
-		minio.NewSourceInfo(bucketName, "srcObject", nil),
-		minio.NewSourceInfo(bucketName, "srcObject", nil),
-	}
-	dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
-		return
-	}
-
-	function = "ComposeObject(destination, sources)"
-	args["source"] = srcs
-	args["destination"] = dst4
-	err = c.ComposeObject(dst4, srcs)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ComposeObject failed", err)
-		return
-	}
-
-	// Check that no headers are copied in this case
-	expectedHeaders = make(http.Header)
-	expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
-	if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-func testUserMetadataCopyingV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "CopyObject(destination, source)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// c.TraceOn(os.Stderr)
-	testUserMetadataCopyingWrapper(c)
-}
-
-func testStorageClassMetadataPutObject() {
-	// initialize logging params
-	startTime := time.Now()
-	function := "testStorageClassMetadataPutObject()"
-	args := map[string]interface{}{}
-	testName := getFuncName()
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
-		return
-	}
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	fetchMeta := func(object string) (h http.Header) {
-		objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
-		if err != nil {
-			logError(testName, function, args, startTime, "", "Stat failed", err)
-			return
-		}
-		h = make(http.Header)
-		for k, vs := range objInfo.Metadata {
-			if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
-				for _, v := range vs {
-					h.Add(k, v)
-				}
-			}
-		}
-		return h
-	}
-
-	metadata := make(http.Header)
-	metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
-
-	const srcSize = 1024 * 1024
-	buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
-
-	_, err = c.PutObject(bucketName, "srcObjectRRSClass",
-		bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-	if !reflect.DeepEqual(metadata, fetchMeta("srcObjectRRSClass")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	metadata = make(http.Header)
-	metadata.Set("x-amz-storage-class", "STANDARD")
-
-	_, err = c.PutObject(bucketName, "srcObjectSSClass",
-		bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-	if !reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-func testStorageClassInvalidMetadataPutObject() {
-	// initialize logging params
-	startTime := time.Now()
-	function := "testStorageClassInvalidMetadataPutObject()"
-	args := map[string]interface{}{}
-	testName := getFuncName()
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
-		return
-	}
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	const srcSize = 1024 * 1024
-	buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB
-
-	_, err = c.PutObject(bucketName, "srcObjectRRSClass",
-		bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-func testStorageClassMetadataCopyObject() {
-	// initialize logging params
-	startTime := time.Now()
-	function := "testStorageClassMetadataCopyObject()"
-	args := map[string]interface{}{}
-	testName := getFuncName()
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err)
-		return
-	}
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-	// Make a new bucket in 'us-east-1' (source bucket).
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	fetchMeta := func(object string) (h http.Header) {
-		objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
-		if err != nil {
-			logError(testName, function, args, startTime, "", "Stat failed", err)
-			return
-		}
-		h = make(http.Header)
-		for k, vs := range objInfo.Metadata {
-			if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") {
-				for _, v := range vs {
-					h.Add(k, v)
-				}
-			}
-		}
-		return h
-	}
-
-	metadata := make(http.Header)
-	metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
-
-	const srcSize = 1024 * 1024
-	buf := bytes.Repeat([]byte("abcde"), srcSize)
-
-	// Put an object with RRS Storage class
-	_, err = c.PutObject(bucketName, "srcObjectRRSClass",
-		bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	// Make server side copy of object uploaded in previous step
-	src := minio.NewSourceInfo(bucketName, "srcObjectRRSClass", nil)
-	dst, err := minio.NewDestinationInfo(bucketName, "srcObjectRRSClassCopy", nil, nil)
-	c.CopyObject(dst, src)
-
-	// Fetch the meta data of copied object
-	if !reflect.DeepEqual(metadata, fetchMeta("srcObjectRRSClassCopy")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	metadata = make(http.Header)
-	metadata.Set("x-amz-storage-class", "STANDARD")
-
-	// Put an object with Standard Storage class
-	_, err = c.PutObject(bucketName, "srcObjectSSClass",
-		bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	// Make server side copy of object uploaded in previous step
-	src = minio.NewSourceInfo(bucketName, "srcObjectSSClass", nil)
-	dst, err = minio.NewDestinationInfo(bucketName, "srcObjectSSClassCopy", nil, nil)
-	c.CopyObject(dst, src)
-
-	// Fetch the meta data of copied object
-	if !reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) {
-		logError(testName, function, args, startTime, "", "Metadata match failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put object with size -1 byte object.
-func testPutObjectNoLengthV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, reader, size, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"size":       -1,
-		"opts":       "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	objectName := bucketName + "unique"
-	args["objectName"] = objectName
-
-	bufSize := dataFileMap["datafile-65-MB"]
-	var reader = getDataReader("datafile-65-MB")
-	defer reader.Close()
-	args["size"] = bufSize
-
-	// Upload an object.
-	n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{})
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
-		return
-	}
-	if n != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put objects of unknown size.
-func testPutObjectsUnknownV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, reader,size,opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"size":       "",
-		"opts":       "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Issues are revealed by trying to upload multiple files of unknown size
-	// sequentially (on 4GB machines)
-	for i := 1; i <= 4; i++ {
-		// Simulate that we could be receiving byte slices of data that we want
-		// to upload as a file
-		rpipe, wpipe := io.Pipe()
-		defer rpipe.Close()
-		go func() {
-			b := []byte("test")
-			wpipe.Write(b)
-			wpipe.Close()
-		}()
-
-		// Upload the object.
-		objectName := fmt.Sprintf("%sunique%d", bucketName, i)
-		args["objectName"] = objectName
-
-		n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{})
-		if err != nil {
-			logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
-			return
-		}
-		args["size"] = n
-		if n != int64(4) {
-			logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err)
-			return
-		}
-
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test put object with 0 byte object.
-func testPutObject0ByteV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObject(bucketName, objectName, reader, size, opts)"
-	args := map[string]interface{}{
-		"bucketName": "",
-		"objectName": "",
-		"size":       0,
-		"opts":       "",
-	}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	objectName := bucketName + "unique"
-	args["objectName"] = objectName
-	args["opts"] = minio.PutObjectOptions{}
-
-	// Upload an object.
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
-
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
-		return
-	}
-	if n != 0 {
-		logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-}
-
-// Test expected error cases
-func testComposeObjectErrorCases() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "ComposeObject(destination, sourceList)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	testComposeObjectErrorCasesWrapper(c)
-}
-
-// Test concatenating 10K objects
-func testCompose10KSources() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "ComposeObject(destination, sourceList)"
-	args := map[string]interface{}{}
-
-	// Instantiate new minio client object
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
-		return
-	}
-
-	testComposeMultipleSources(c)
-}
-
-// Tests comprehensive list of all methods.
-func testFunctionalV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "testFunctionalV2()"
-	function_all := ""
-	args := map[string]interface{}{}
-
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// Enable to debug
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	location := "us-east-1"
-	// Make a new bucket.
-	function = "MakeBucket(bucketName, location)"
-	function_all = "MakeBucket(bucketName, location)"
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"location":   location,
-	}
-	err = c.MakeBucket(bucketName, location)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	// Generate a random file name.
-	fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	file, err := os.Create(fileName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "file create failed", err)
-		return
-	}
-	for i := 0; i < 3; i++ {
-		buf := make([]byte, rand.Intn(1<<19))
-		_, err = file.Write(buf)
-		if err != nil {
-			logError(testName, function, args, startTime, "", "file write failed", err)
-			return
-		}
-	}
-	file.Close()
-
-	// Verify if bucket exits and you have access.
-	var exists bool
-	function = "BucketExists(bucketName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-	}
-	exists, err = c.BucketExists(bucketName)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "BucketExists failed", err)
-		return
-	}
-	if !exists {
-		logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err)
-		return
-	}
-
-	// Make the bucket 'public read/write'.
-	function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":   bucketName,
-		"objectPrefix": "",
-		"bucketPolicy": policy.BucketPolicyReadWrite,
-	}
-	err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
-		return
-	}
-
-	// List all buckets.
-	function = "ListBuckets()"
-	function_all += ", " + function
-	args = nil
-	buckets, err := c.ListBuckets()
-	if len(buckets) == 0 {
-		logError(testName, function, args, startTime, "", "List buckets cannot be empty", err)
-		return
-	}
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ListBuckets failed", err)
-		return
-	}
-
-	// Verify if previously created bucket is listed in list buckets.
-	bucketFound := false
-	for _, bucket := range buckets {
-		if bucket.Name == bucketName {
-			bucketFound = true
-		}
-	}
-
-	// If bucket not found error out.
-	if !bucketFound {
-		logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err)
-		return
-	}
-
-	objectName := bucketName + "unique"
-
-	// Generate data
-	buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
-
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName,
-		"contentType": "",
-	}
-	n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-	if n != int64(len(buf)) {
-		logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
-		return
-	}
-
-	objectName_noLength := objectName + "-nolength"
-	args["objectName"] = objectName_noLength
-	n, err = c.PutObject(bucketName, objectName_noLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	if n != int64(len(buf)) {
-		logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
-		return
-	}
-
-	// Instantiate a done channel to close all listing.
-	doneCh := make(chan struct{})
-	defer close(doneCh)
-
-	objFound := false
-	isRecursive := true // Recursive is true.
-	function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName,
-		"isRecursive": isRecursive,
-	}
-	for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
-		if obj.Key == objectName {
-			objFound = true
-			break
-		}
-	}
-	if !objFound {
-		logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err)
-		return
-	}
-
-	incompObjNotFound := true
-	function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName":  bucketName,
-		"objectName":  objectName,
-		"isRecursive": isRecursive,
-	}
-	for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
-		if objIncompl.Key != "" {
-			incompObjNotFound = false
-			break
-		}
-	}
-	if !incompObjNotFound {
-		logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
-		return
-	}
-
-	function = "GetObject(bucketName, objectName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-	}
-	newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	newReadBytes, err := ioutil.ReadAll(newReader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	if !bytes.Equal(newReadBytes, buf) {
-		logError(testName, function, args, startTime, "", "Bytes mismatch", err)
-		return
-	}
-
-	function = "FGetObject(bucketName, objectName, fileName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-		"fileName":   fileName + "-f",
-	}
-	err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FgetObject failed", err)
-		return
-	}
-
-	// Generate presigned HEAD object url.
-	function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-		"expires":    3600 * time.Second,
-	}
-	presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
-		return
-	}
-	// Verify if presigned url works.
-	resp, err := http.Head(presignedHeadURL.String())
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
-		return
-	}
-	if resp.StatusCode != http.StatusOK {
-		logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err)
-		return
-	}
-	if resp.Header.Get("ETag") == "" {
-		logError(testName, function, args, startTime, "", "Got empty ETag", err)
-		return
-	}
-	resp.Body.Close()
-
-	// Generate presigned GET object url.
-	function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName,
-		"expires":    3600 * time.Second,
-	}
-	presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
-		return
-	}
-	// Verify if presigned url works.
-	resp, err = http.Get(presignedGetURL.String())
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
-		return
-	}
-	if resp.StatusCode != http.StatusOK {
-		logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
-		return
-	}
-	newPresignedBytes, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-	resp.Body.Close()
-	if !bytes.Equal(newPresignedBytes, buf) {
-		logError(testName, function, args, startTime, "", "Bytes mismatch", err)
-		return
-	}
-
-	// Set request parameters.
-	reqParams := make(url.Values)
-	reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
-	// Generate presigned GET object url.
-	args["reqParams"] = reqParams
-	presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
-		return
-	}
-	// Verify if presigned url works.
-	resp, err = http.Get(presignedGetURL.String())
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
-		return
-	}
-	if resp.StatusCode != http.StatusOK {
-		logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
-		return
-	}
-	newPresignedBytes, err = ioutil.ReadAll(resp.Body)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-	if !bytes.Equal(newPresignedBytes, buf) {
-		logError(testName, function, args, startTime, "", "Bytes mismatch", err)
-		return
-	}
-	// Verify content disposition.
-	if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
-		logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err)
-		return
-	}
-
-	function = "PresignedPutObject(bucketName, objectName, expires)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName + "-presigned",
-		"expires":    3600 * time.Second,
-	}
-	presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
-		return
-	}
-
-	// Generate data more than 32K
-	buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
-
-	req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
-	if err != nil {
-		logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
-		return
-	}
-	httpClient := &http.Client{
-		// Setting a sensible time out of 30secs to wait for response
-		// headers. Request is pro-actively cancelled after 30secs
-		// with no response.
-		Timeout:   30 * time.Second,
-		Transport: http.DefaultTransport,
-	}
-	resp, err = httpClient.Do(req)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
-		return
-	}
-
-	function = "GetObject(bucketName, objectName)"
-	function_all += ", " + function
-	args = map[string]interface{}{
-		"bucketName": bucketName,
-		"objectName": objectName + "-presigned",
-	}
-	newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObject failed", err)
-		return
-	}
-
-	newReadBytes, err = ioutil.ReadAll(newReader)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "ReadAll failed", err)
-		return
-	}
-
-	if !bytes.Equal(newReadBytes, buf) {
-		logError(testName, function, args, startTime, "", "Bytes mismatch", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	if err = os.Remove(fileName); err != nil {
-		logError(testName, function, args, startTime, "", "File remove failed", err)
-		return
-	}
-	if err = os.Remove(fileName + "-f"); err != nil {
-		logError(testName, function, args, startTime, "", "File removes failed", err)
-		return
-	}
-	successLogger(testName, function_all, args, startTime).Info()
-}
-
-// Test get object with GetObjectWithContext
-func testGetObjectWithContext() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObjectWithContext(ctx, bucketName, objectName)"
-	args := map[string]interface{}{
-		"ctx":        "",
-		"bucketName": "",
-		"objectName": "",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	_, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	args["ctx"] = ctx
-	defer cancel()
-
-	r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
-		return
-	}
-	if _, err = r.Stat(); err == nil {
-		logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
-		return
-	}
-
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	args["ctx"] = ctx
-	defer cancel()
-
-	// Read the data back
-	r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "object Stat call failed", err)
-		return
-	}
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err)
-		return
-	}
-	if err := r.Close(); err != nil {
-		logError(testName, function, args, startTime, "", "object Close() call failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test get object with FGetObjectWithContext
-func testFGetObjectWithContext() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)"
-	args := map[string]interface{}{
-		"ctx":        "",
-		"bucketName": "",
-		"objectName": "",
-		"fileName":   "",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV4(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	bufSize := dataFileMap["datafile-1-MB"]
-	var reader = getDataReader("datafile-1-MB")
-	defer reader.Close()
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	_, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject failed", err)
-		return
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	args["ctx"] = ctx
-	defer cancel()
-
-	fileName := "tempfile-context"
-	args["fileName"] = fileName
-	// Read the data back
-	err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
-		return
-	}
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	defer cancel()
-
-	// Read the data back
-	err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err)
-		return
-	}
-	if err = os.Remove(fileName + "-fcontext"); err != nil {
-		logError(testName, function, args, startTime, "", "Remove file failed", err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test validates putObject with context to see if request cancellation is honored for V2.
-func testPutObjectWithContextV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)"
-	args := map[string]interface{}{
-		"ctx":        "",
-		"bucketName": "",
-		"objectName": "",
-		"size":       "",
-		"opts":       "",
-	}
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Make a new bucket.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-	defer c.RemoveBucket(bucketName)
-	bufSize := dataFileMap["datatfile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-
-	objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
-	args["objectName"] = objectName
-
-	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
-	args["ctx"] = ctx
-	args["size"] = bufSize
-	defer cancel()
-
-	_, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err)
-		return
-	}
-
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	args["ctx"] = ctx
-
-	defer cancel()
-	reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-	_, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test get object with GetObjectWithContext
-func testGetObjectWithContextV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "GetObjectWithContext(ctx, bucketName, objectName)"
-	args := map[string]interface{}{
-		"ctx":        "",
-		"bucketName": "",
-		"objectName": "",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket failed", err)
-		return
-	}
-
-	bufSize := dataFileMap["datafile-33-kB"]
-	var reader = getDataReader("datafile-33-kB")
-	defer reader.Close()
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	_, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject call failed", err)
-		return
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	args["ctx"] = ctx
-	defer cancel()
-
-	r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
-		return
-	}
-	if _, err = r.Stat(); err == nil {
-		logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
-		return
-	}
-
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	defer cancel()
-
-	// Read the data back
-	r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err)
-		return
-	}
-
-	st, err := r.Stat()
-	if err != nil {
-		logError(testName, function, args, startTime, "", "object Stat call failed", err)
-		return
-	}
-	if st.Size != int64(bufSize) {
-		logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
-		return
-	}
-	if err := r.Close(); err != nil {
-		logError(testName, function, args, startTime, "", " object Close() call failed", err)
-		return
-	}
-
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Test get object with FGetObjectWithContext
-func testFGetObjectWithContextV2() {
-	// initialize logging params
-	startTime := time.Now()
-	testName := getFuncName()
-	function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)"
-	args := map[string]interface{}{
-		"ctx":        "",
-		"bucketName": "",
-		"objectName": "",
-		"fileName":   "",
-	}
-	// Seed random based on current time.
-	rand.Seed(time.Now().Unix())
-
-	// Instantiate new minio client object.
-	c, err := minio.NewV2(
-		os.Getenv(serverEndpoint),
-		os.Getenv(accessKey),
-		os.Getenv(secretKey),
-		mustParseBool(os.Getenv(enableHTTPS)),
-	)
-	if err != nil {
-		logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
-		return
-	}
-
-	// Enable tracing, write to stderr.
-	// c.TraceOn(os.Stderr)
-
-	// Set user agent.
-	c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
-	// Generate a new random bucket name.
-	bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
-	args["bucketName"] = bucketName
-
-	// Make a new bucket.
-	err = c.MakeBucket(bucketName, "us-east-1")
-	if err != nil {
-		logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
-		return
-	}
-
-	bufSize := dataFileMap["datatfile-1-MB"]
-	var reader = getDataReader("datafile-1-MB")
-	defer reader.Close()
-	// Save the data
-	objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-	args["objectName"] = objectName
-
-	_, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "PutObject call failed", err)
-		return
-	}
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	args["ctx"] = ctx
-	defer cancel()
-
-	fileName := "tempfile-context"
-	args["fileName"] = fileName
-
-	// Read the data back
-	err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
-	if err == nil {
-		logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
-		return
-	}
-	ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
-	defer cancel()
-
-	// Read the data back
-	err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
-	if err != nil {
-		logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err)
-		return
-	}
-
-	if err = os.Remove(fileName + "-fcontext"); err != nil {
-		logError(testName, function, args, startTime, "", "Remove file failed", err)
-		return
-	}
-	// Delete all objects and buckets
-	if err = cleanupBucket(bucketName, c); err != nil {
-		logError(testName, function, args, startTime, "", "Cleanup failed", err)
-		return
-	}
-
-	successLogger(testName, function, args, startTime).Info()
-
-}
-
-// Convert string to bool and always return false if any error
-func mustParseBool(str string) bool {
-	b, err := strconv.ParseBool(str)
-	if err != nil {
-		return false
-	}
-	return b
-}
-
-func main() {
-	// Output to stdout instead of the default stderr
-	log.SetOutput(os.Stdout)
-	// create custom formatter
-	mintFormatter := mintJSONFormatter{}
-	// set custom formatter
-	log.SetFormatter(&mintFormatter)
-	// log Info or above -- success cases are Info level, failures are Fatal level
-	log.SetLevel(log.InfoLevel)
-
-	tls := mustParseBool(os.Getenv(enableHTTPS))
-	// execute tests
-	if !isQuickMode() {
-		testMakeBucketErrorV2()
-		testGetObjectClosedTwiceV2()
-		testRemovePartiallyUploadedV2()
-		testFPutObjectV2()
-		testMakeBucketRegionsV2()
-		testGetObjectReadSeekFunctionalV2()
-		testGetObjectReadAtFunctionalV2()
-		testCopyObjectV2()
-		testFunctionalV2()
-		testComposeObjectErrorCasesV2()
-		testCompose10KSourcesV2()
-		testUserMetadataCopyingV2()
-		testPutObject0ByteV2()
-		testPutObjectNoLengthV2()
-		testPutObjectsUnknownV2()
-		testGetObjectWithContextV2()
-		testFPutObjectWithContextV2()
-		testFGetObjectWithContextV2()
-		testPutObjectWithContextV2()
-		testMakeBucketError()
-		testMakeBucketRegions()
-		testPutObjectWithMetadata()
-		testPutObjectReadAt()
-		testPutObjectStreaming()
-		testListPartiallyUploaded()
-		testGetObjectSeekEnd()
-		testGetObjectClosedTwice()
-		testRemoveMultipleObjects()
-		testRemovePartiallyUploaded()
-		testFPutObjectMultipart()
-		testFPutObject()
-		testGetObjectReadSeekFunctional()
-		testGetObjectReadAtFunctional()
-		testPresignedPostPolicy()
-		testCopyObject()
-		testEncryptionPutGet()
-		testEncryptionFPut()
-		testComposeObjectErrorCases()
-		testCompose10KSources()
-		testUserMetadataCopying()
-		testBucketNotification()
-		testFunctional()
-		testGetObjectModified()
-		testPutObjectUploadSeekedObject()
-		testGetObjectWithContext()
-		testFPutObjectWithContext()
-		testFGetObjectWithContext()
-		testPutObjectWithContext()
-		testStorageClassMetadataPutObject()
-		testStorageClassInvalidMetadataPutObject()
-		testStorageClassMetadataCopyObject()
-
-		// SSE-C tests will only work over TLS connection.
-		if tls {
-			testEncryptedCopyObjectV2()
-			testEncryptedCopyObject()
-		}
-	} else {
-		testFunctional()
-		testFunctionalV2()
-	}
-}

+ 0 - 71
vendor/github.com/minio/minio-go/hook-reader.go

@@ -1,71 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2015-2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import "io"
-
-// hookReader hooks additional reader in the source stream. It is
-// useful for making progress bars. Second reader is appropriately
-// notified about the exact number of bytes read from the primary
-// source on each Read operation.
-type hookReader struct {
-	source io.Reader
-	hook   io.Reader
-}
-
-// Seek implements io.Seeker. Seeks source first, and if necessary
-// seeks hook if Seek method is appropriately found.
-func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) {
-	// Verify for source has embedded Seeker, use it.
-	sourceSeeker, ok := hr.source.(io.Seeker)
-	if ok {
-		return sourceSeeker.Seek(offset, whence)
-	}
-	// Verify if hook has embedded Seeker, use it.
-	hookSeeker, ok := hr.hook.(io.Seeker)
-	if ok {
-		return hookSeeker.Seek(offset, whence)
-	}
-	return n, nil
-}
-
-// Read implements io.Reader. Always reads from the source, the return
-// value 'n' number of bytes are reported through the hook. Returns
-// error for all non io.EOF conditions.
-func (hr *hookReader) Read(b []byte) (n int, err error) {
-	n, err = hr.source.Read(b)
-	if err != nil && err != io.EOF {
-		return n, err
-	}
-	// Progress the hook with the total read bytes from the source.
-	if _, herr := hr.hook.Read(b[:n]); herr != nil {
-		if herr != io.EOF {
-			return n, herr
-		}
-	}
-	return n, err
-}
-
-// newHook returns a io.ReadSeeker which implements hookReader that
-// reports the data read from the source to the hook.
-func newHook(source, hook io.Reader) io.Reader {
-	if hook == nil {
-		return source
-	}
-	return &hookReader{source, hook}
-}

+ 0 - 89
vendor/github.com/minio/minio-go/pkg/credentials/chain.go

@@ -1,89 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package credentials
-
-// A Chain will search for a provider which returns credentials
-// and cache that provider until Retrieve is called again.
-//
-// The Chain provides a way of chaining multiple providers together
-// which will pick the first available using priority order of the
-// Providers in the list.
-//
-// If none of the Providers retrieve valid credentials Value, ChainProvider's
-// Retrieve() will return the no credentials value.
-//
-// If a Provider is found which returns valid credentials Value ChainProvider
-// will cache that Provider for all calls to IsExpired(), until Retrieve is
-// called again after IsExpired() is true.
-//
-//     creds := credentials.NewChainCredentials(
-//         []credentials.Provider{
-//             &credentials.EnvAWSS3{},
-//             &credentials.EnvMinio{},
-//         })
-//
-//     // Usage of ChainCredentials.
-//     mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
-//     if err != nil {
-//          log.Fatalln(err)
-//     }
-//
-type Chain struct {
-	Providers []Provider
-	curr      Provider
-}
-
-// NewChainCredentials returns a pointer to a new Credentials object
-// wrapping a chain of providers.
-func NewChainCredentials(providers []Provider) *Credentials {
-	return New(&Chain{
-		Providers: append([]Provider{}, providers...),
-	})
-}
-
-// Retrieve returns the credentials value, returns no credentials(anonymous)
-// if no credentials provider returned any value.
-//
-// If a provider is found with credentials, it will be cached and any calls
-// to IsExpired() will return the expired state of the cached provider.
-func (c *Chain) Retrieve() (Value, error) {
-	for _, p := range c.Providers {
-		creds, _ := p.Retrieve()
-		// Always prioritize non-anonymous providers, if any.
-		if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
-			continue
-		}
-		c.curr = p
-		return creds, nil
-	}
-	// At this point we have exhausted all the providers and
-	// are left without any credentials return anonymous.
-	return Value{
-		SignerType: SignatureAnonymous,
-	}, nil
-}
-
-// IsExpired will returned the expired state of the currently cached provider
-// if there is one. If there is no current provider, true will be returned.
-func (c *Chain) IsExpired() bool {
-	if c.curr != nil {
-		return c.curr.IsExpired()
-	}
-
-	return true
-}

+ 0 - 175
vendor/github.com/minio/minio-go/pkg/credentials/credentials.go

@@ -1,175 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package credentials
-
-import (
-	"sync"
-	"time"
-)
-
-// A Value is the AWS credentials value for individual credential fields.
-type Value struct {
-	// AWS Access key ID
-	AccessKeyID string
-
-	// AWS Secret Access Key
-	SecretAccessKey string
-
-	// AWS Session Token
-	SessionToken string
-
-	// Signature Type.
-	SignerType SignatureType
-}
-
-// A Provider is the interface for any component which will provide credentials
-// Value. A provider is required to manage its own Expired state, and what to
-// be expired means.
-type Provider interface {
-	// Retrieve returns nil if it successfully retrieved the value.
-	// Error is returned if the value were not obtainable, or empty.
-	Retrieve() (Value, error)
-
-	// IsExpired returns if the credentials are no longer valid, and need
-	// to be retrieved.
-	IsExpired() bool
-}
-
-// A Expiry provides shared expiration logic to be used by credentials
-// providers to implement expiry functionality.
-//
-// The best method to use this struct is as an anonymous field within the
-// provider's struct.
-//
-// Example:
-//     type IAMCredentialProvider struct {
-//         Expiry
-//         ...
-//     }
-type Expiry struct {
-	// The date/time when to expire on
-	expiration time.Time
-
-	// If set will be used by IsExpired to determine the current time.
-	// Defaults to time.Now if CurrentTime is not set.
-	CurrentTime func() time.Time
-}
-
-// SetExpiration sets the expiration IsExpired will check when called.
-//
-// If window is greater than 0 the expiration time will be reduced by the
-// window value.
-//
-// Using a window is helpful to trigger credentials to expire sooner than
-// the expiration time given to ensure no requests are made with expired
-// tokens.
-func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
-	e.expiration = expiration
-	if window > 0 {
-		e.expiration = e.expiration.Add(-window)
-	}
-}
-
-// IsExpired returns if the credentials are expired.
-func (e *Expiry) IsExpired() bool {
-	if e.CurrentTime == nil {
-		e.CurrentTime = time.Now
-	}
-	return e.expiration.Before(e.CurrentTime())
-}
-
-// Credentials - A container for synchronous safe retrieval of credentials Value.
-// Credentials will cache the credentials value until they expire. Once the value
-// expires the next Get will attempt to retrieve valid credentials.
-//
-// Credentials is safe to use across multiple goroutines and will manage the
-// synchronous state so the Providers do not need to implement their own
-// synchronization.
-//
-// The first Credentials.Get() will always call Provider.Retrieve() to get the
-// first instance of the credentials Value. All calls to Get() after that
-// will return the cached credentials Value until IsExpired() returns true.
-type Credentials struct {
-	sync.Mutex
-
-	creds        Value
-	forceRefresh bool
-	provider     Provider
-}
-
-// New returns a pointer to a new Credentials with the provider set.
-func New(provider Provider) *Credentials {
-	return &Credentials{
-		provider:     provider,
-		forceRefresh: true,
-	}
-}
-
-// Get returns the credentials value, or error if the credentials Value failed
-// to be retrieved.
-//
-// Will return the cached credentials Value if it has not expired. If the
-// credentials Value has expired the Provider's Retrieve() will be called
-// to refresh the credentials.
-//
-// If Credentials.Expire() was called the credentials Value will be force
-// expired, and the next call to Get() will cause them to be refreshed.
-func (c *Credentials) Get() (Value, error) {
-	c.Lock()
-	defer c.Unlock()
-
-	if c.isExpired() {
-		creds, err := c.provider.Retrieve()
-		if err != nil {
-			return Value{}, err
-		}
-		c.creds = creds
-		c.forceRefresh = false
-	}
-
-	return c.creds, nil
-}
-
-// Expire expires the credentials and forces them to be retrieved on the
-// next call to Get().
-//
-// This will override the Provider's expired state, and force Credentials
-// to call the Provider's Retrieve().
-func (c *Credentials) Expire() {
-	c.Lock()
-	defer c.Unlock()
-
-	c.forceRefresh = true
-}
-
-// IsExpired returns if the credentials are no longer valid, and need
-// to be refreshed.
-//
-// If the Credentials were forced to be expired with Expire() this will
-// reflect that override.
-func (c *Credentials) IsExpired() bool {
-	c.Lock()
-	defer c.Unlock()
-
-	return c.isExpired()
-}
-
-// isExpired helper method wrapping the definition of expired credentials.
-func (c *Credentials) isExpired() bool {
-	return c.forceRefresh || c.provider.IsExpired()
-}

+ 0 - 62
vendor/github.com/minio/minio-go/pkg/credentials/doc.go

@@ -1,62 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Package credentials provides credential retrieval and management
-// for S3 compatible object storage.
-//
-// By default the Credentials.Get() will cache the successful result of a
-// Provider's Retrieve() until Provider.IsExpired() returns true. At which
-// point Credentials will call Provider's Retrieve() to get new credential Value.
-//
-// The Provider is responsible for determining when credentials have expired.
-// It is also important to note that Credentials will always call Retrieve the
-// first time Credentials.Get() is called.
-//
-// Example of using the environment variable credentials.
-//
-//     creds := NewFromEnv()
-//     // Retrieve the credentials value
-//     credValue, err := creds.Get()
-//     if err != nil {
-//         // handle error
-//     }
-//
-// Example of forcing credentials to expire and be refreshed on the next Get().
-// This may be helpful to proactively expire credentials and refresh them sooner
-// than they would naturally expire on their own.
-//
-//     creds := NewFromIAM("")
-//     creds.Expire()
-//     credsValue, err := creds.Get()
-//     // New credentials will be retrieved instead of from cache.
-//
-//
-// Custom Provider
-//
-// Each Provider built into this package also provides a helper method to generate
-// a Credentials pointer setup with the provider. To use a custom Provider just
-// create a type which satisfies the Provider interface and pass it to the
-// NewCredentials method.
-//
-//     type MyProvider struct{}
-//     func (m *MyProvider) Retrieve() (Value, error) {...}
-//     func (m *MyProvider) IsExpired() bool {...}
-//
-//     creds := NewCredentials(&MyProvider{})
-//     credValue, err := creds.Get()
-//
-package credentials

+ 0 - 71
vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go

@@ -1,71 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * Copyright 2017 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package credentials
-
-import "os"
-
-// A EnvAWS retrieves credentials from the environment variables of the
-// running process. EnvAWSironment credentials never expire.
-//
-// EnvAWSironment variables used:
-//
-// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
-// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
-// * Secret Token:      AWS_SESSION_TOKEN.
-type EnvAWS struct {
-	retrieved bool
-}
-
-// NewEnvAWS returns a pointer to a new Credentials object
-// wrapping the environment variable provider.
-func NewEnvAWS() *Credentials {
-	return New(&EnvAWS{})
-}
-
-// Retrieve retrieves the keys from the environment.
-func (e *EnvAWS) Retrieve() (Value, error) {
-	e.retrieved = false
-
-	id := os.Getenv("AWS_ACCESS_KEY_ID")
-	if id == "" {
-		id = os.Getenv("AWS_ACCESS_KEY")
-	}
-
-	secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
-	if secret == "" {
-		secret = os.Getenv("AWS_SECRET_KEY")
-	}
-
-	signerType := SignatureV4
-	if id == "" || secret == "" {
-		signerType = SignatureAnonymous
-	}
-
-	e.retrieved = true
-	return Value{
-		AccessKeyID:     id,
-		SecretAccessKey: secret,
-		SessionToken:    os.Getenv("AWS_SESSION_TOKEN"),
-		SignerType:      signerType,
-	}, nil
-}
-
-// IsExpired returns if the credentials have been retrieved.
-func (e *EnvAWS) IsExpired() bool {
-	return !e.retrieved
-}

Some files were not shown because too many files changed in this diff